]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Add reference count to request structure
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/oom.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
40
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43                                                    bool force);
44 static __must_check int
45 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
46                                bool readonly);
47 static void
48 i915_gem_object_retire(struct drm_i915_gem_object *obj);
49
50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
51                                  struct drm_i915_gem_object *obj);
52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53                                          struct drm_i915_fence_reg *fence,
54                                          bool enable);
55
56 static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
57                                              struct shrink_control *sc);
58 static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
59                                             struct shrink_control *sc);
60 static int i915_gem_shrinker_oom(struct notifier_block *nb,
61                                  unsigned long event,
62                                  void *ptr);
63 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
64
65 static bool cpu_cache_is_coherent(struct drm_device *dev,
66                                   enum i915_cache_level level)
67 {
68         return HAS_LLC(dev) || level != I915_CACHE_NONE;
69 }
70
71 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
72 {
73         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
74                 return true;
75
76         return obj->pin_display;
77 }
78
79 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
80 {
81         if (obj->tiling_mode)
82                 i915_gem_release_mmap(obj);
83
84         /* As we do not have an associated fence register, we will force
85          * a tiling change if we ever need to acquire one.
86          */
87         obj->fence_dirty = false;
88         obj->fence_reg = I915_FENCE_REG_NONE;
89 }
90
91 /* some bookkeeping */
92 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
93                                   size_t size)
94 {
95         spin_lock(&dev_priv->mm.object_stat_lock);
96         dev_priv->mm.object_count++;
97         dev_priv->mm.object_memory += size;
98         spin_unlock(&dev_priv->mm.object_stat_lock);
99 }
100
101 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
102                                      size_t size)
103 {
104         spin_lock(&dev_priv->mm.object_stat_lock);
105         dev_priv->mm.object_count--;
106         dev_priv->mm.object_memory -= size;
107         spin_unlock(&dev_priv->mm.object_stat_lock);
108 }
109
110 static int
111 i915_gem_wait_for_error(struct i915_gpu_error *error)
112 {
113         int ret;
114
115 #define EXIT_COND (!i915_reset_in_progress(error) || \
116                    i915_terminally_wedged(error))
117         if (EXIT_COND)
118                 return 0;
119
120         /*
121          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
122          * userspace. If it takes that long something really bad is going on and
123          * we should simply try to bail out and fail as gracefully as possible.
124          */
125         ret = wait_event_interruptible_timeout(error->reset_queue,
126                                                EXIT_COND,
127                                                10*HZ);
128         if (ret == 0) {
129                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
130                 return -EIO;
131         } else if (ret < 0) {
132                 return ret;
133         }
134 #undef EXIT_COND
135
136         return 0;
137 }
138
139 int i915_mutex_lock_interruptible(struct drm_device *dev)
140 {
141         struct drm_i915_private *dev_priv = dev->dev_private;
142         int ret;
143
144         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
145         if (ret)
146                 return ret;
147
148         ret = mutex_lock_interruptible(&dev->struct_mutex);
149         if (ret)
150                 return ret;
151
152         WARN_ON(i915_verify_lists(dev));
153         return 0;
154 }
155
156 static inline bool
157 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
158 {
159         return i915_gem_obj_bound_any(obj) && !obj->active;
160 }
161
162 int
163 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
164                             struct drm_file *file)
165 {
166         struct drm_i915_private *dev_priv = dev->dev_private;
167         struct drm_i915_gem_get_aperture *args = data;
168         struct drm_i915_gem_object *obj;
169         size_t pinned;
170
171         pinned = 0;
172         mutex_lock(&dev->struct_mutex);
173         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
174                 if (i915_gem_obj_is_pinned(obj))
175                         pinned += i915_gem_obj_ggtt_size(obj);
176         mutex_unlock(&dev->struct_mutex);
177
178         args->aper_size = dev_priv->gtt.base.total;
179         args->aper_available_size = args->aper_size - pinned;
180
181         return 0;
182 }
183
184 static int
185 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
186 {
187         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
188         char *vaddr = obj->phys_handle->vaddr;
189         struct sg_table *st;
190         struct scatterlist *sg;
191         int i;
192
193         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
194                 return -EINVAL;
195
196         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
197                 struct page *page;
198                 char *src;
199
200                 page = shmem_read_mapping_page(mapping, i);
201                 if (IS_ERR(page))
202                         return PTR_ERR(page);
203
204                 src = kmap_atomic(page);
205                 memcpy(vaddr, src, PAGE_SIZE);
206                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
207                 kunmap_atomic(src);
208
209                 page_cache_release(page);
210                 vaddr += PAGE_SIZE;
211         }
212
213         i915_gem_chipset_flush(obj->base.dev);
214
215         st = kmalloc(sizeof(*st), GFP_KERNEL);
216         if (st == NULL)
217                 return -ENOMEM;
218
219         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
220                 kfree(st);
221                 return -ENOMEM;
222         }
223
224         sg = st->sgl;
225         sg->offset = 0;
226         sg->length = obj->base.size;
227
228         sg_dma_address(sg) = obj->phys_handle->busaddr;
229         sg_dma_len(sg) = obj->base.size;
230
231         obj->pages = st;
232         obj->has_dma_mapping = true;
233         return 0;
234 }
235
236 static void
237 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
238 {
239         int ret;
240
241         BUG_ON(obj->madv == __I915_MADV_PURGED);
242
243         ret = i915_gem_object_set_to_cpu_domain(obj, true);
244         if (ret) {
245                 /* In the event of a disaster, abandon all caches and
246                  * hope for the best.
247                  */
248                 WARN_ON(ret != -EIO);
249                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
250         }
251
252         if (obj->madv == I915_MADV_DONTNEED)
253                 obj->dirty = 0;
254
255         if (obj->dirty) {
256                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
257                 char *vaddr = obj->phys_handle->vaddr;
258                 int i;
259
260                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
261                         struct page *page;
262                         char *dst;
263
264                         page = shmem_read_mapping_page(mapping, i);
265                         if (IS_ERR(page))
266                                 continue;
267
268                         dst = kmap_atomic(page);
269                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
270                         memcpy(dst, vaddr, PAGE_SIZE);
271                         kunmap_atomic(dst);
272
273                         set_page_dirty(page);
274                         if (obj->madv == I915_MADV_WILLNEED)
275                                 mark_page_accessed(page);
276                         page_cache_release(page);
277                         vaddr += PAGE_SIZE;
278                 }
279                 obj->dirty = 0;
280         }
281
282         sg_free_table(obj->pages);
283         kfree(obj->pages);
284
285         obj->has_dma_mapping = false;
286 }
287
288 static void
289 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
290 {
291         drm_pci_free(obj->base.dev, obj->phys_handle);
292 }
293
294 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
295         .get_pages = i915_gem_object_get_pages_phys,
296         .put_pages = i915_gem_object_put_pages_phys,
297         .release = i915_gem_object_release_phys,
298 };
299
300 static int
301 drop_pages(struct drm_i915_gem_object *obj)
302 {
303         struct i915_vma *vma, *next;
304         int ret;
305
306         drm_gem_object_reference(&obj->base);
307         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
308                 if (i915_vma_unbind(vma))
309                         break;
310
311         ret = i915_gem_object_put_pages(obj);
312         drm_gem_object_unreference(&obj->base);
313
314         return ret;
315 }
316
317 int
318 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
319                             int align)
320 {
321         drm_dma_handle_t *phys;
322         int ret;
323
324         if (obj->phys_handle) {
325                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
326                         return -EBUSY;
327
328                 return 0;
329         }
330
331         if (obj->madv != I915_MADV_WILLNEED)
332                 return -EFAULT;
333
334         if (obj->base.filp == NULL)
335                 return -EINVAL;
336
337         ret = drop_pages(obj);
338         if (ret)
339                 return ret;
340
341         /* create a new object */
342         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
343         if (!phys)
344                 return -ENOMEM;
345
346         obj->phys_handle = phys;
347         obj->ops = &i915_gem_phys_ops;
348
349         return i915_gem_object_get_pages(obj);
350 }
351
352 static int
353 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
354                      struct drm_i915_gem_pwrite *args,
355                      struct drm_file *file_priv)
356 {
357         struct drm_device *dev = obj->base.dev;
358         void *vaddr = obj->phys_handle->vaddr + args->offset;
359         char __user *user_data = to_user_ptr(args->data_ptr);
360         int ret;
361
362         /* We manually control the domain here and pretend that it
363          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
364          */
365         ret = i915_gem_object_wait_rendering(obj, false);
366         if (ret)
367                 return ret;
368
369         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
370                 unsigned long unwritten;
371
372                 /* The physical object once assigned is fixed for the lifetime
373                  * of the obj, so we can safely drop the lock and continue
374                  * to access vaddr.
375                  */
376                 mutex_unlock(&dev->struct_mutex);
377                 unwritten = copy_from_user(vaddr, user_data, args->size);
378                 mutex_lock(&dev->struct_mutex);
379                 if (unwritten)
380                         return -EFAULT;
381         }
382
383         drm_clflush_virt_range(vaddr, args->size);
384         i915_gem_chipset_flush(dev);
385         return 0;
386 }
387
388 void *i915_gem_object_alloc(struct drm_device *dev)
389 {
390         struct drm_i915_private *dev_priv = dev->dev_private;
391         return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
392 }
393
394 void i915_gem_object_free(struct drm_i915_gem_object *obj)
395 {
396         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
397         kmem_cache_free(dev_priv->slab, obj);
398 }
399
400 static int
401 i915_gem_create(struct drm_file *file,
402                 struct drm_device *dev,
403                 uint64_t size,
404                 bool dumb,
405                 uint32_t *handle_p)
406 {
407         struct drm_i915_gem_object *obj;
408         int ret;
409         u32 handle;
410
411         size = roundup(size, PAGE_SIZE);
412         if (size == 0)
413                 return -EINVAL;
414
415         /* Allocate the new object */
416         obj = i915_gem_alloc_object(dev, size);
417         if (obj == NULL)
418                 return -ENOMEM;
419
420         obj->base.dumb = dumb;
421         ret = drm_gem_handle_create(file, &obj->base, &handle);
422         /* drop reference from allocate - handle holds it now */
423         drm_gem_object_unreference_unlocked(&obj->base);
424         if (ret)
425                 return ret;
426
427         *handle_p = handle;
428         return 0;
429 }
430
431 int
432 i915_gem_dumb_create(struct drm_file *file,
433                      struct drm_device *dev,
434                      struct drm_mode_create_dumb *args)
435 {
436         /* have to work out size/pitch and return them */
437         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
438         args->size = args->pitch * args->height;
439         return i915_gem_create(file, dev,
440                                args->size, true, &args->handle);
441 }
442
443 /**
444  * Creates a new mm object and returns a handle to it.
445  */
446 int
447 i915_gem_create_ioctl(struct drm_device *dev, void *data,
448                       struct drm_file *file)
449 {
450         struct drm_i915_gem_create *args = data;
451
452         return i915_gem_create(file, dev,
453                                args->size, false, &args->handle);
454 }
455
456 static inline int
457 __copy_to_user_swizzled(char __user *cpu_vaddr,
458                         const char *gpu_vaddr, int gpu_offset,
459                         int length)
460 {
461         int ret, cpu_offset = 0;
462
463         while (length > 0) {
464                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
465                 int this_length = min(cacheline_end - gpu_offset, length);
466                 int swizzled_gpu_offset = gpu_offset ^ 64;
467
468                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
469                                      gpu_vaddr + swizzled_gpu_offset,
470                                      this_length);
471                 if (ret)
472                         return ret + length;
473
474                 cpu_offset += this_length;
475                 gpu_offset += this_length;
476                 length -= this_length;
477         }
478
479         return 0;
480 }
481
482 static inline int
483 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
484                           const char __user *cpu_vaddr,
485                           int length)
486 {
487         int ret, cpu_offset = 0;
488
489         while (length > 0) {
490                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
491                 int this_length = min(cacheline_end - gpu_offset, length);
492                 int swizzled_gpu_offset = gpu_offset ^ 64;
493
494                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
495                                        cpu_vaddr + cpu_offset,
496                                        this_length);
497                 if (ret)
498                         return ret + length;
499
500                 cpu_offset += this_length;
501                 gpu_offset += this_length;
502                 length -= this_length;
503         }
504
505         return 0;
506 }
507
508 /*
509  * Pins the specified object's pages and synchronizes the object with
510  * GPU accesses. Sets needs_clflush to non-zero if the caller should
511  * flush the object from the CPU cache.
512  */
513 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
514                                     int *needs_clflush)
515 {
516         int ret;
517
518         *needs_clflush = 0;
519
520         if (!obj->base.filp)
521                 return -EINVAL;
522
523         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
524                 /* If we're not in the cpu read domain, set ourself into the gtt
525                  * read domain and manually flush cachelines (if required). This
526                  * optimizes for the case when the gpu will dirty the data
527                  * anyway again before the next pread happens. */
528                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
529                                                         obj->cache_level);
530                 ret = i915_gem_object_wait_rendering(obj, true);
531                 if (ret)
532                         return ret;
533
534                 i915_gem_object_retire(obj);
535         }
536
537         ret = i915_gem_object_get_pages(obj);
538         if (ret)
539                 return ret;
540
541         i915_gem_object_pin_pages(obj);
542
543         return ret;
544 }
545
546 /* Per-page copy function for the shmem pread fastpath.
547  * Flushes invalid cachelines before reading the target if
548  * needs_clflush is set. */
549 static int
550 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
551                  char __user *user_data,
552                  bool page_do_bit17_swizzling, bool needs_clflush)
553 {
554         char *vaddr;
555         int ret;
556
557         if (unlikely(page_do_bit17_swizzling))
558                 return -EINVAL;
559
560         vaddr = kmap_atomic(page);
561         if (needs_clflush)
562                 drm_clflush_virt_range(vaddr + shmem_page_offset,
563                                        page_length);
564         ret = __copy_to_user_inatomic(user_data,
565                                       vaddr + shmem_page_offset,
566                                       page_length);
567         kunmap_atomic(vaddr);
568
569         return ret ? -EFAULT : 0;
570 }
571
572 static void
573 shmem_clflush_swizzled_range(char *addr, unsigned long length,
574                              bool swizzled)
575 {
576         if (unlikely(swizzled)) {
577                 unsigned long start = (unsigned long) addr;
578                 unsigned long end = (unsigned long) addr + length;
579
580                 /* For swizzling simply ensure that we always flush both
581                  * channels. Lame, but simple and it works. Swizzled
582                  * pwrite/pread is far from a hotpath - current userspace
583                  * doesn't use it at all. */
584                 start = round_down(start, 128);
585                 end = round_up(end, 128);
586
587                 drm_clflush_virt_range((void *)start, end - start);
588         } else {
589                 drm_clflush_virt_range(addr, length);
590         }
591
592 }
593
594 /* Only difference to the fast-path function is that this can handle bit17
595  * and uses non-atomic copy and kmap functions. */
596 static int
597 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
598                  char __user *user_data,
599                  bool page_do_bit17_swizzling, bool needs_clflush)
600 {
601         char *vaddr;
602         int ret;
603
604         vaddr = kmap(page);
605         if (needs_clflush)
606                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
607                                              page_length,
608                                              page_do_bit17_swizzling);
609
610         if (page_do_bit17_swizzling)
611                 ret = __copy_to_user_swizzled(user_data,
612                                               vaddr, shmem_page_offset,
613                                               page_length);
614         else
615                 ret = __copy_to_user(user_data,
616                                      vaddr + shmem_page_offset,
617                                      page_length);
618         kunmap(page);
619
620         return ret ? - EFAULT : 0;
621 }
622
623 static int
624 i915_gem_shmem_pread(struct drm_device *dev,
625                      struct drm_i915_gem_object *obj,
626                      struct drm_i915_gem_pread *args,
627                      struct drm_file *file)
628 {
629         char __user *user_data;
630         ssize_t remain;
631         loff_t offset;
632         int shmem_page_offset, page_length, ret = 0;
633         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
634         int prefaulted = 0;
635         int needs_clflush = 0;
636         struct sg_page_iter sg_iter;
637
638         user_data = to_user_ptr(args->data_ptr);
639         remain = args->size;
640
641         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
642
643         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
644         if (ret)
645                 return ret;
646
647         offset = args->offset;
648
649         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
650                          offset >> PAGE_SHIFT) {
651                 struct page *page = sg_page_iter_page(&sg_iter);
652
653                 if (remain <= 0)
654                         break;
655
656                 /* Operation in this page
657                  *
658                  * shmem_page_offset = offset within page in shmem file
659                  * page_length = bytes to copy for this page
660                  */
661                 shmem_page_offset = offset_in_page(offset);
662                 page_length = remain;
663                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
664                         page_length = PAGE_SIZE - shmem_page_offset;
665
666                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
667                         (page_to_phys(page) & (1 << 17)) != 0;
668
669                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
670                                        user_data, page_do_bit17_swizzling,
671                                        needs_clflush);
672                 if (ret == 0)
673                         goto next_page;
674
675                 mutex_unlock(&dev->struct_mutex);
676
677                 if (likely(!i915.prefault_disable) && !prefaulted) {
678                         ret = fault_in_multipages_writeable(user_data, remain);
679                         /* Userspace is tricking us, but we've already clobbered
680                          * its pages with the prefault and promised to write the
681                          * data up to the first fault. Hence ignore any errors
682                          * and just continue. */
683                         (void)ret;
684                         prefaulted = 1;
685                 }
686
687                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
688                                        user_data, page_do_bit17_swizzling,
689                                        needs_clflush);
690
691                 mutex_lock(&dev->struct_mutex);
692
693                 if (ret)
694                         goto out;
695
696 next_page:
697                 remain -= page_length;
698                 user_data += page_length;
699                 offset += page_length;
700         }
701
702 out:
703         i915_gem_object_unpin_pages(obj);
704
705         return ret;
706 }
707
708 /**
709  * Reads data from the object referenced by handle.
710  *
711  * On error, the contents of *data are undefined.
712  */
713 int
714 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
715                      struct drm_file *file)
716 {
717         struct drm_i915_gem_pread *args = data;
718         struct drm_i915_gem_object *obj;
719         int ret = 0;
720
721         if (args->size == 0)
722                 return 0;
723
724         if (!access_ok(VERIFY_WRITE,
725                        to_user_ptr(args->data_ptr),
726                        args->size))
727                 return -EFAULT;
728
729         ret = i915_mutex_lock_interruptible(dev);
730         if (ret)
731                 return ret;
732
733         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
734         if (&obj->base == NULL) {
735                 ret = -ENOENT;
736                 goto unlock;
737         }
738
739         /* Bounds check source.  */
740         if (args->offset > obj->base.size ||
741             args->size > obj->base.size - args->offset) {
742                 ret = -EINVAL;
743                 goto out;
744         }
745
746         /* prime objects have no backing filp to GEM pread/pwrite
747          * pages from.
748          */
749         if (!obj->base.filp) {
750                 ret = -EINVAL;
751                 goto out;
752         }
753
754         trace_i915_gem_object_pread(obj, args->offset, args->size);
755
756         ret = i915_gem_shmem_pread(dev, obj, args, file);
757
758 out:
759         drm_gem_object_unreference(&obj->base);
760 unlock:
761         mutex_unlock(&dev->struct_mutex);
762         return ret;
763 }
764
765 /* This is the fast write path which cannot handle
766  * page faults in the source data
767  */
768
769 static inline int
770 fast_user_write(struct io_mapping *mapping,
771                 loff_t page_base, int page_offset,
772                 char __user *user_data,
773                 int length)
774 {
775         void __iomem *vaddr_atomic;
776         void *vaddr;
777         unsigned long unwritten;
778
779         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
780         /* We can use the cpu mem copy function because this is X86. */
781         vaddr = (void __force*)vaddr_atomic + page_offset;
782         unwritten = __copy_from_user_inatomic_nocache(vaddr,
783                                                       user_data, length);
784         io_mapping_unmap_atomic(vaddr_atomic);
785         return unwritten;
786 }
787
788 /**
789  * This is the fast pwrite path, where we copy the data directly from the
790  * user into the GTT, uncached.
791  */
792 static int
793 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
794                          struct drm_i915_gem_object *obj,
795                          struct drm_i915_gem_pwrite *args,
796                          struct drm_file *file)
797 {
798         struct drm_i915_private *dev_priv = dev->dev_private;
799         ssize_t remain;
800         loff_t offset, page_base;
801         char __user *user_data;
802         int page_offset, page_length, ret;
803
804         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
805         if (ret)
806                 goto out;
807
808         ret = i915_gem_object_set_to_gtt_domain(obj, true);
809         if (ret)
810                 goto out_unpin;
811
812         ret = i915_gem_object_put_fence(obj);
813         if (ret)
814                 goto out_unpin;
815
816         user_data = to_user_ptr(args->data_ptr);
817         remain = args->size;
818
819         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
820
821         while (remain > 0) {
822                 /* Operation in this page
823                  *
824                  * page_base = page offset within aperture
825                  * page_offset = offset within page
826                  * page_length = bytes to copy for this page
827                  */
828                 page_base = offset & PAGE_MASK;
829                 page_offset = offset_in_page(offset);
830                 page_length = remain;
831                 if ((page_offset + remain) > PAGE_SIZE)
832                         page_length = PAGE_SIZE - page_offset;
833
834                 /* If we get a fault while copying data, then (presumably) our
835                  * source page isn't available.  Return the error and we'll
836                  * retry in the slow path.
837                  */
838                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
839                                     page_offset, user_data, page_length)) {
840                         ret = -EFAULT;
841                         goto out_unpin;
842                 }
843
844                 remain -= page_length;
845                 user_data += page_length;
846                 offset += page_length;
847         }
848
849 out_unpin:
850         i915_gem_object_ggtt_unpin(obj);
851 out:
852         return ret;
853 }
854
855 /* Per-page copy function for the shmem pwrite fastpath.
856  * Flushes invalid cachelines before writing to the target if
857  * needs_clflush_before is set and flushes out any written cachelines after
858  * writing if needs_clflush is set. */
859 static int
860 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
861                   char __user *user_data,
862                   bool page_do_bit17_swizzling,
863                   bool needs_clflush_before,
864                   bool needs_clflush_after)
865 {
866         char *vaddr;
867         int ret;
868
869         if (unlikely(page_do_bit17_swizzling))
870                 return -EINVAL;
871
872         vaddr = kmap_atomic(page);
873         if (needs_clflush_before)
874                 drm_clflush_virt_range(vaddr + shmem_page_offset,
875                                        page_length);
876         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
877                                         user_data, page_length);
878         if (needs_clflush_after)
879                 drm_clflush_virt_range(vaddr + shmem_page_offset,
880                                        page_length);
881         kunmap_atomic(vaddr);
882
883         return ret ? -EFAULT : 0;
884 }
885
886 /* Only difference to the fast-path function is that this can handle bit17
887  * and uses non-atomic copy and kmap functions. */
888 static int
889 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
890                   char __user *user_data,
891                   bool page_do_bit17_swizzling,
892                   bool needs_clflush_before,
893                   bool needs_clflush_after)
894 {
895         char *vaddr;
896         int ret;
897
898         vaddr = kmap(page);
899         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
900                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
901                                              page_length,
902                                              page_do_bit17_swizzling);
903         if (page_do_bit17_swizzling)
904                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
905                                                 user_data,
906                                                 page_length);
907         else
908                 ret = __copy_from_user(vaddr + shmem_page_offset,
909                                        user_data,
910                                        page_length);
911         if (needs_clflush_after)
912                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
913                                              page_length,
914                                              page_do_bit17_swizzling);
915         kunmap(page);
916
917         return ret ? -EFAULT : 0;
918 }
919
920 static int
921 i915_gem_shmem_pwrite(struct drm_device *dev,
922                       struct drm_i915_gem_object *obj,
923                       struct drm_i915_gem_pwrite *args,
924                       struct drm_file *file)
925 {
926         ssize_t remain;
927         loff_t offset;
928         char __user *user_data;
929         int shmem_page_offset, page_length, ret = 0;
930         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
931         int hit_slowpath = 0;
932         int needs_clflush_after = 0;
933         int needs_clflush_before = 0;
934         struct sg_page_iter sg_iter;
935
936         user_data = to_user_ptr(args->data_ptr);
937         remain = args->size;
938
939         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
940
941         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
942                 /* If we're not in the cpu write domain, set ourself into the gtt
943                  * write domain and manually flush cachelines (if required). This
944                  * optimizes for the case when the gpu will use the data
945                  * right away and we therefore have to clflush anyway. */
946                 needs_clflush_after = cpu_write_needs_clflush(obj);
947                 ret = i915_gem_object_wait_rendering(obj, false);
948                 if (ret)
949                         return ret;
950
951                 i915_gem_object_retire(obj);
952         }
953         /* Same trick applies to invalidate partially written cachelines read
954          * before writing. */
955         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
956                 needs_clflush_before =
957                         !cpu_cache_is_coherent(dev, obj->cache_level);
958
959         ret = i915_gem_object_get_pages(obj);
960         if (ret)
961                 return ret;
962
963         i915_gem_object_pin_pages(obj);
964
965         offset = args->offset;
966         obj->dirty = 1;
967
968         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
969                          offset >> PAGE_SHIFT) {
970                 struct page *page = sg_page_iter_page(&sg_iter);
971                 int partial_cacheline_write;
972
973                 if (remain <= 0)
974                         break;
975
976                 /* Operation in this page
977                  *
978                  * shmem_page_offset = offset within page in shmem file
979                  * page_length = bytes to copy for this page
980                  */
981                 shmem_page_offset = offset_in_page(offset);
982
983                 page_length = remain;
984                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
985                         page_length = PAGE_SIZE - shmem_page_offset;
986
987                 /* If we don't overwrite a cacheline completely we need to be
988                  * careful to have up-to-date data by first clflushing. Don't
989                  * overcomplicate things and flush the entire patch. */
990                 partial_cacheline_write = needs_clflush_before &&
991                         ((shmem_page_offset | page_length)
992                                 & (boot_cpu_data.x86_clflush_size - 1));
993
994                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
995                         (page_to_phys(page) & (1 << 17)) != 0;
996
997                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
998                                         user_data, page_do_bit17_swizzling,
999                                         partial_cacheline_write,
1000                                         needs_clflush_after);
1001                 if (ret == 0)
1002                         goto next_page;
1003
1004                 hit_slowpath = 1;
1005                 mutex_unlock(&dev->struct_mutex);
1006                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1007                                         user_data, page_do_bit17_swizzling,
1008                                         partial_cacheline_write,
1009                                         needs_clflush_after);
1010
1011                 mutex_lock(&dev->struct_mutex);
1012
1013                 if (ret)
1014                         goto out;
1015
1016 next_page:
1017                 remain -= page_length;
1018                 user_data += page_length;
1019                 offset += page_length;
1020         }
1021
1022 out:
1023         i915_gem_object_unpin_pages(obj);
1024
1025         if (hit_slowpath) {
1026                 /*
1027                  * Fixup: Flush cpu caches in case we didn't flush the dirty
1028                  * cachelines in-line while writing and the object moved
1029                  * out of the cpu write domain while we've dropped the lock.
1030                  */
1031                 if (!needs_clflush_after &&
1032                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1033                         if (i915_gem_clflush_object(obj, obj->pin_display))
1034                                 i915_gem_chipset_flush(dev);
1035                 }
1036         }
1037
1038         if (needs_clflush_after)
1039                 i915_gem_chipset_flush(dev);
1040
1041         return ret;
1042 }
1043
1044 /**
1045  * Writes data to the object referenced by handle.
1046  *
1047  * On error, the contents of the buffer that were to be modified are undefined.
1048  */
1049 int
1050 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1051                       struct drm_file *file)
1052 {
1053         struct drm_i915_gem_pwrite *args = data;
1054         struct drm_i915_gem_object *obj;
1055         int ret;
1056
1057         if (args->size == 0)
1058                 return 0;
1059
1060         if (!access_ok(VERIFY_READ,
1061                        to_user_ptr(args->data_ptr),
1062                        args->size))
1063                 return -EFAULT;
1064
1065         if (likely(!i915.prefault_disable)) {
1066                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1067                                                    args->size);
1068                 if (ret)
1069                         return -EFAULT;
1070         }
1071
1072         ret = i915_mutex_lock_interruptible(dev);
1073         if (ret)
1074                 return ret;
1075
1076         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1077         if (&obj->base == NULL) {
1078                 ret = -ENOENT;
1079                 goto unlock;
1080         }
1081
1082         /* Bounds check destination. */
1083         if (args->offset > obj->base.size ||
1084             args->size > obj->base.size - args->offset) {
1085                 ret = -EINVAL;
1086                 goto out;
1087         }
1088
1089         /* prime objects have no backing filp to GEM pread/pwrite
1090          * pages from.
1091          */
1092         if (!obj->base.filp) {
1093                 ret = -EINVAL;
1094                 goto out;
1095         }
1096
1097         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1098
1099         ret = -EFAULT;
1100         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1101          * it would end up going through the fenced access, and we'll get
1102          * different detiling behavior between reading and writing.
1103          * pread/pwrite currently are reading and writing from the CPU
1104          * perspective, requiring manual detiling by the client.
1105          */
1106         if (obj->tiling_mode == I915_TILING_NONE &&
1107             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1108             cpu_write_needs_clflush(obj)) {
1109                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1110                 /* Note that the gtt paths might fail with non-page-backed user
1111                  * pointers (e.g. gtt mappings when moving data between
1112                  * textures). Fallback to the shmem path in that case. */
1113         }
1114
1115         if (ret == -EFAULT || ret == -ENOSPC) {
1116                 if (obj->phys_handle)
1117                         ret = i915_gem_phys_pwrite(obj, args, file);
1118                 else
1119                         ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1120         }
1121
1122 out:
1123         drm_gem_object_unreference(&obj->base);
1124 unlock:
1125         mutex_unlock(&dev->struct_mutex);
1126         return ret;
1127 }
1128
1129 int
1130 i915_gem_check_wedge(struct i915_gpu_error *error,
1131                      bool interruptible)
1132 {
1133         if (i915_reset_in_progress(error)) {
1134                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1135                  * -EIO unconditionally for these. */
1136                 if (!interruptible)
1137                         return -EIO;
1138
1139                 /* Recovery complete, but the reset failed ... */
1140                 if (i915_terminally_wedged(error))
1141                         return -EIO;
1142
1143                 /*
1144                  * Check if GPU Reset is in progress - we need intel_ring_begin
1145                  * to work properly to reinit the hw state while the gpu is
1146                  * still marked as reset-in-progress. Handle this with a flag.
1147                  */
1148                 if (!error->reload_in_reset)
1149                         return -EAGAIN;
1150         }
1151
1152         return 0;
1153 }
1154
1155 /*
1156  * Compare seqno against outstanding lazy request. Emit a request if they are
1157  * equal.
1158  */
1159 int
1160 i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
1161 {
1162         int ret;
1163
1164         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1165
1166         ret = 0;
1167         if (seqno == ring->outstanding_lazy_seqno)
1168                 ret = i915_add_request(ring, NULL);
1169
1170         return ret;
1171 }
1172
1173 static void fake_irq(unsigned long data)
1174 {
1175         wake_up_process((struct task_struct *)data);
1176 }
1177
1178 static bool missed_irq(struct drm_i915_private *dev_priv,
1179                        struct intel_engine_cs *ring)
1180 {
1181         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1182 }
1183
1184 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1185 {
1186         if (file_priv == NULL)
1187                 return true;
1188
1189         return !atomic_xchg(&file_priv->rps_wait_boost, true);
1190 }
1191
1192 /**
1193  * __i915_wait_seqno - wait until execution of seqno has finished
1194  * @ring: the ring expected to report seqno
1195  * @seqno: duh!
1196  * @reset_counter: reset sequence associated with the given seqno
1197  * @interruptible: do an interruptible wait (normally yes)
1198  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1199  *
1200  * Note: It is of utmost importance that the passed in seqno and reset_counter
1201  * values have been read by the caller in an smp safe manner. Where read-side
1202  * locks are involved, it is sufficient to read the reset_counter before
1203  * unlocking the lock that protects the seqno. For lockless tricks, the
1204  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1205  * inserted.
1206  *
1207  * Returns 0 if the seqno was found within the alloted time. Else returns the
1208  * errno with remaining time filled in timeout argument.
1209  */
1210 int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1211                         unsigned reset_counter,
1212                         bool interruptible,
1213                         s64 *timeout,
1214                         struct drm_i915_file_private *file_priv)
1215 {
1216         struct drm_device *dev = ring->dev;
1217         struct drm_i915_private *dev_priv = dev->dev_private;
1218         const bool irq_test_in_progress =
1219                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1220         DEFINE_WAIT(wait);
1221         unsigned long timeout_expire;
1222         s64 before, now;
1223         int ret;
1224
1225         WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1226
1227         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1228                 return 0;
1229
1230         timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
1231
1232         if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1233                 gen6_rps_boost(dev_priv);
1234                 if (file_priv)
1235                         mod_delayed_work(dev_priv->wq,
1236                                          &file_priv->mm.idle_work,
1237                                          msecs_to_jiffies(100));
1238         }
1239
1240         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1241                 return -ENODEV;
1242
1243         /* Record current time in case interrupted by signal, or wedged */
1244         trace_i915_gem_request_wait_begin(ring, seqno);
1245         before = ktime_get_raw_ns();
1246         for (;;) {
1247                 struct timer_list timer;
1248
1249                 prepare_to_wait(&ring->irq_queue, &wait,
1250                                 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1251
1252                 /* We need to check whether any gpu reset happened in between
1253                  * the caller grabbing the seqno and now ... */
1254                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1255                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1256                          * is truely gone. */
1257                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1258                         if (ret == 0)
1259                                 ret = -EAGAIN;
1260                         break;
1261                 }
1262
1263                 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1264                         ret = 0;
1265                         break;
1266                 }
1267
1268                 if (interruptible && signal_pending(current)) {
1269                         ret = -ERESTARTSYS;
1270                         break;
1271                 }
1272
1273                 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1274                         ret = -ETIME;
1275                         break;
1276                 }
1277
1278                 timer.function = NULL;
1279                 if (timeout || missed_irq(dev_priv, ring)) {
1280                         unsigned long expire;
1281
1282                         setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1283                         expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1284                         mod_timer(&timer, expire);
1285                 }
1286
1287                 io_schedule();
1288
1289                 if (timer.function) {
1290                         del_singleshot_timer_sync(&timer);
1291                         destroy_timer_on_stack(&timer);
1292                 }
1293         }
1294         now = ktime_get_raw_ns();
1295         trace_i915_gem_request_wait_end(ring, seqno);
1296
1297         if (!irq_test_in_progress)
1298                 ring->irq_put(ring);
1299
1300         finish_wait(&ring->irq_queue, &wait);
1301
1302         if (timeout) {
1303                 s64 tres = *timeout - (now - before);
1304
1305                 *timeout = tres < 0 ? 0 : tres;
1306         }
1307
1308         return ret;
1309 }
1310
1311 /**
1312  * Waits for a sequence number to be signaled, and cleans up the
1313  * request and object lists appropriately for that event.
1314  */
1315 int
1316 i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1317 {
1318         struct drm_device *dev = ring->dev;
1319         struct drm_i915_private *dev_priv = dev->dev_private;
1320         bool interruptible = dev_priv->mm.interruptible;
1321         unsigned reset_counter;
1322         int ret;
1323
1324         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1325         BUG_ON(seqno == 0);
1326
1327         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1328         if (ret)
1329                 return ret;
1330
1331         ret = i915_gem_check_olr(ring, seqno);
1332         if (ret)
1333                 return ret;
1334
1335         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1336         return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
1337                                  NULL, NULL);
1338 }
1339
1340 static int
1341 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
1342 {
1343         if (!obj->active)
1344                 return 0;
1345
1346         /* Manually manage the write flush as we may have not yet
1347          * retired the buffer.
1348          *
1349          * Note that the last_write_seqno is always the earlier of
1350          * the two (read/write) seqno, so if we haved successfully waited,
1351          * we know we have passed the last write.
1352          */
1353         obj->last_write_seqno = 0;
1354
1355         return 0;
1356 }
1357
1358 /**
1359  * Ensures that all rendering to the object has completed and the object is
1360  * safe to unbind from the GTT or access from the CPU.
1361  */
1362 static __must_check int
1363 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1364                                bool readonly)
1365 {
1366         struct intel_engine_cs *ring = obj->ring;
1367         u32 seqno;
1368         int ret;
1369
1370         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1371         if (seqno == 0)
1372                 return 0;
1373
1374         ret = i915_wait_seqno(ring, seqno);
1375         if (ret)
1376                 return ret;
1377
1378         return i915_gem_object_wait_rendering__tail(obj);
1379 }
1380
1381 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1382  * as the object state may change during this call.
1383  */
1384 static __must_check int
1385 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1386                                             struct drm_i915_file_private *file_priv,
1387                                             bool readonly)
1388 {
1389         struct drm_device *dev = obj->base.dev;
1390         struct drm_i915_private *dev_priv = dev->dev_private;
1391         struct intel_engine_cs *ring = obj->ring;
1392         unsigned reset_counter;
1393         u32 seqno;
1394         int ret;
1395
1396         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1397         BUG_ON(!dev_priv->mm.interruptible);
1398
1399         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1400         if (seqno == 0)
1401                 return 0;
1402
1403         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1404         if (ret)
1405                 return ret;
1406
1407         ret = i915_gem_check_olr(ring, seqno);
1408         if (ret)
1409                 return ret;
1410
1411         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1412         mutex_unlock(&dev->struct_mutex);
1413         ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
1414                                 file_priv);
1415         mutex_lock(&dev->struct_mutex);
1416         if (ret)
1417                 return ret;
1418
1419         return i915_gem_object_wait_rendering__tail(obj);
1420 }
1421
1422 /**
1423  * Called when user space prepares to use an object with the CPU, either
1424  * through the mmap ioctl's mapping or a GTT mapping.
1425  */
1426 int
1427 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1428                           struct drm_file *file)
1429 {
1430         struct drm_i915_gem_set_domain *args = data;
1431         struct drm_i915_gem_object *obj;
1432         uint32_t read_domains = args->read_domains;
1433         uint32_t write_domain = args->write_domain;
1434         int ret;
1435
1436         /* Only handle setting domains to types used by the CPU. */
1437         if (write_domain & I915_GEM_GPU_DOMAINS)
1438                 return -EINVAL;
1439
1440         if (read_domains & I915_GEM_GPU_DOMAINS)
1441                 return -EINVAL;
1442
1443         /* Having something in the write domain implies it's in the read
1444          * domain, and only that read domain.  Enforce that in the request.
1445          */
1446         if (write_domain != 0 && read_domains != write_domain)
1447                 return -EINVAL;
1448
1449         ret = i915_mutex_lock_interruptible(dev);
1450         if (ret)
1451                 return ret;
1452
1453         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1454         if (&obj->base == NULL) {
1455                 ret = -ENOENT;
1456                 goto unlock;
1457         }
1458
1459         /* Try to flush the object off the GPU without holding the lock.
1460          * We will repeat the flush holding the lock in the normal manner
1461          * to catch cases where we are gazumped.
1462          */
1463         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1464                                                           file->driver_priv,
1465                                                           !write_domain);
1466         if (ret)
1467                 goto unref;
1468
1469         if (read_domains & I915_GEM_DOMAIN_GTT) {
1470                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1471
1472                 /* Silently promote "you're not bound, there was nothing to do"
1473                  * to success, since the client was just asking us to
1474                  * make sure everything was done.
1475                  */
1476                 if (ret == -EINVAL)
1477                         ret = 0;
1478         } else {
1479                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1480         }
1481
1482 unref:
1483         drm_gem_object_unreference(&obj->base);
1484 unlock:
1485         mutex_unlock(&dev->struct_mutex);
1486         return ret;
1487 }
1488
1489 /**
1490  * Called when user space has done writes to this buffer
1491  */
1492 int
1493 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1494                          struct drm_file *file)
1495 {
1496         struct drm_i915_gem_sw_finish *args = data;
1497         struct drm_i915_gem_object *obj;
1498         int ret = 0;
1499
1500         ret = i915_mutex_lock_interruptible(dev);
1501         if (ret)
1502                 return ret;
1503
1504         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1505         if (&obj->base == NULL) {
1506                 ret = -ENOENT;
1507                 goto unlock;
1508         }
1509
1510         /* Pinned buffers may be scanout, so flush the cache */
1511         if (obj->pin_display)
1512                 i915_gem_object_flush_cpu_write_domain(obj, true);
1513
1514         drm_gem_object_unreference(&obj->base);
1515 unlock:
1516         mutex_unlock(&dev->struct_mutex);
1517         return ret;
1518 }
1519
1520 /**
1521  * Maps the contents of an object, returning the address it is mapped
1522  * into.
1523  *
1524  * While the mapping holds a reference on the contents of the object, it doesn't
1525  * imply a ref on the object itself.
1526  *
1527  * IMPORTANT:
1528  *
1529  * DRM driver writers who look a this function as an example for how to do GEM
1530  * mmap support, please don't implement mmap support like here. The modern way
1531  * to implement DRM mmap support is with an mmap offset ioctl (like
1532  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1533  * That way debug tooling like valgrind will understand what's going on, hiding
1534  * the mmap call in a driver private ioctl will break that. The i915 driver only
1535  * does cpu mmaps this way because we didn't know better.
1536  */
1537 int
1538 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1539                     struct drm_file *file)
1540 {
1541         struct drm_i915_gem_mmap *args = data;
1542         struct drm_gem_object *obj;
1543         unsigned long addr;
1544
1545         obj = drm_gem_object_lookup(dev, file, args->handle);
1546         if (obj == NULL)
1547                 return -ENOENT;
1548
1549         /* prime objects have no backing filp to GEM mmap
1550          * pages from.
1551          */
1552         if (!obj->filp) {
1553                 drm_gem_object_unreference_unlocked(obj);
1554                 return -EINVAL;
1555         }
1556
1557         addr = vm_mmap(obj->filp, 0, args->size,
1558                        PROT_READ | PROT_WRITE, MAP_SHARED,
1559                        args->offset);
1560         drm_gem_object_unreference_unlocked(obj);
1561         if (IS_ERR((void *)addr))
1562                 return addr;
1563
1564         args->addr_ptr = (uint64_t) addr;
1565
1566         return 0;
1567 }
1568
1569 /**
1570  * i915_gem_fault - fault a page into the GTT
1571  * vma: VMA in question
1572  * vmf: fault info
1573  *
1574  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1575  * from userspace.  The fault handler takes care of binding the object to
1576  * the GTT (if needed), allocating and programming a fence register (again,
1577  * only if needed based on whether the old reg is still valid or the object
1578  * is tiled) and inserting a new PTE into the faulting process.
1579  *
1580  * Note that the faulting process may involve evicting existing objects
1581  * from the GTT and/or fence registers to make room.  So performance may
1582  * suffer if the GTT working set is large or there are few fence registers
1583  * left.
1584  */
1585 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1586 {
1587         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1588         struct drm_device *dev = obj->base.dev;
1589         struct drm_i915_private *dev_priv = dev->dev_private;
1590         pgoff_t page_offset;
1591         unsigned long pfn;
1592         int ret = 0;
1593         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1594
1595         intel_runtime_pm_get(dev_priv);
1596
1597         /* We don't use vmf->pgoff since that has the fake offset */
1598         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1599                 PAGE_SHIFT;
1600
1601         ret = i915_mutex_lock_interruptible(dev);
1602         if (ret)
1603                 goto out;
1604
1605         trace_i915_gem_object_fault(obj, page_offset, true, write);
1606
1607         /* Try to flush the object off the GPU first without holding the lock.
1608          * Upon reacquiring the lock, we will perform our sanity checks and then
1609          * repeat the flush holding the lock in the normal manner to catch cases
1610          * where we are gazumped.
1611          */
1612         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1613         if (ret)
1614                 goto unlock;
1615
1616         /* Access to snoopable pages through the GTT is incoherent. */
1617         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1618                 ret = -EFAULT;
1619                 goto unlock;
1620         }
1621
1622         /* Now bind it into the GTT if needed */
1623         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1624         if (ret)
1625                 goto unlock;
1626
1627         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1628         if (ret)
1629                 goto unpin;
1630
1631         ret = i915_gem_object_get_fence(obj);
1632         if (ret)
1633                 goto unpin;
1634
1635         /* Finally, remap it using the new GTT offset */
1636         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1637         pfn >>= PAGE_SHIFT;
1638
1639         if (!obj->fault_mappable) {
1640                 unsigned long size = min_t(unsigned long,
1641                                            vma->vm_end - vma->vm_start,
1642                                            obj->base.size);
1643                 int i;
1644
1645                 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1646                         ret = vm_insert_pfn(vma,
1647                                             (unsigned long)vma->vm_start + i * PAGE_SIZE,
1648                                             pfn + i);
1649                         if (ret)
1650                                 break;
1651                 }
1652
1653                 obj->fault_mappable = true;
1654         } else
1655                 ret = vm_insert_pfn(vma,
1656                                     (unsigned long)vmf->virtual_address,
1657                                     pfn + page_offset);
1658 unpin:
1659         i915_gem_object_ggtt_unpin(obj);
1660 unlock:
1661         mutex_unlock(&dev->struct_mutex);
1662 out:
1663         switch (ret) {
1664         case -EIO:
1665                 /*
1666                  * We eat errors when the gpu is terminally wedged to avoid
1667                  * userspace unduly crashing (gl has no provisions for mmaps to
1668                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1669                  * and so needs to be reported.
1670                  */
1671                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1672                         ret = VM_FAULT_SIGBUS;
1673                         break;
1674                 }
1675         case -EAGAIN:
1676                 /*
1677                  * EAGAIN means the gpu is hung and we'll wait for the error
1678                  * handler to reset everything when re-faulting in
1679                  * i915_mutex_lock_interruptible.
1680                  */
1681         case 0:
1682         case -ERESTARTSYS:
1683         case -EINTR:
1684         case -EBUSY:
1685                 /*
1686                  * EBUSY is ok: this just means that another thread
1687                  * already did the job.
1688                  */
1689                 ret = VM_FAULT_NOPAGE;
1690                 break;
1691         case -ENOMEM:
1692                 ret = VM_FAULT_OOM;
1693                 break;
1694         case -ENOSPC:
1695         case -EFAULT:
1696                 ret = VM_FAULT_SIGBUS;
1697                 break;
1698         default:
1699                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1700                 ret = VM_FAULT_SIGBUS;
1701                 break;
1702         }
1703
1704         intel_runtime_pm_put(dev_priv);
1705         return ret;
1706 }
1707
1708 /**
1709  * i915_gem_release_mmap - remove physical page mappings
1710  * @obj: obj in question
1711  *
1712  * Preserve the reservation of the mmapping with the DRM core code, but
1713  * relinquish ownership of the pages back to the system.
1714  *
1715  * It is vital that we remove the page mapping if we have mapped a tiled
1716  * object through the GTT and then lose the fence register due to
1717  * resource pressure. Similarly if the object has been moved out of the
1718  * aperture, than pages mapped into userspace must be revoked. Removing the
1719  * mapping will then trigger a page fault on the next user access, allowing
1720  * fixup by i915_gem_fault().
1721  */
1722 void
1723 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1724 {
1725         if (!obj->fault_mappable)
1726                 return;
1727
1728         drm_vma_node_unmap(&obj->base.vma_node,
1729                            obj->base.dev->anon_inode->i_mapping);
1730         obj->fault_mappable = false;
1731 }
1732
1733 void
1734 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1735 {
1736         struct drm_i915_gem_object *obj;
1737
1738         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1739                 i915_gem_release_mmap(obj);
1740 }
1741
1742 uint32_t
1743 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1744 {
1745         uint32_t gtt_size;
1746
1747         if (INTEL_INFO(dev)->gen >= 4 ||
1748             tiling_mode == I915_TILING_NONE)
1749                 return size;
1750
1751         /* Previous chips need a power-of-two fence region when tiling */
1752         if (INTEL_INFO(dev)->gen == 3)
1753                 gtt_size = 1024*1024;
1754         else
1755                 gtt_size = 512*1024;
1756
1757         while (gtt_size < size)
1758                 gtt_size <<= 1;
1759
1760         return gtt_size;
1761 }
1762
1763 /**
1764  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1765  * @obj: object to check
1766  *
1767  * Return the required GTT alignment for an object, taking into account
1768  * potential fence register mapping.
1769  */
1770 uint32_t
1771 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1772                            int tiling_mode, bool fenced)
1773 {
1774         /*
1775          * Minimum alignment is 4k (GTT page size), but might be greater
1776          * if a fence register is needed for the object.
1777          */
1778         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1779             tiling_mode == I915_TILING_NONE)
1780                 return 4096;
1781
1782         /*
1783          * Previous chips need to be aligned to the size of the smallest
1784          * fence register that can contain the object.
1785          */
1786         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1787 }
1788
1789 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1790 {
1791         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1792         int ret;
1793
1794         if (drm_vma_node_has_offset(&obj->base.vma_node))
1795                 return 0;
1796
1797         dev_priv->mm.shrinker_no_lock_stealing = true;
1798
1799         ret = drm_gem_create_mmap_offset(&obj->base);
1800         if (ret != -ENOSPC)
1801                 goto out;
1802
1803         /* Badly fragmented mmap space? The only way we can recover
1804          * space is by destroying unwanted objects. We can't randomly release
1805          * mmap_offsets as userspace expects them to be persistent for the
1806          * lifetime of the objects. The closest we can is to release the
1807          * offsets on purgeable objects by truncating it and marking it purged,
1808          * which prevents userspace from ever using that object again.
1809          */
1810         i915_gem_shrink(dev_priv,
1811                         obj->base.size >> PAGE_SHIFT,
1812                         I915_SHRINK_BOUND |
1813                         I915_SHRINK_UNBOUND |
1814                         I915_SHRINK_PURGEABLE);
1815         ret = drm_gem_create_mmap_offset(&obj->base);
1816         if (ret != -ENOSPC)
1817                 goto out;
1818
1819         i915_gem_shrink_all(dev_priv);
1820         ret = drm_gem_create_mmap_offset(&obj->base);
1821 out:
1822         dev_priv->mm.shrinker_no_lock_stealing = false;
1823
1824         return ret;
1825 }
1826
1827 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1828 {
1829         drm_gem_free_mmap_offset(&obj->base);
1830 }
1831
1832 static int
1833 i915_gem_mmap_gtt(struct drm_file *file,
1834                   struct drm_device *dev,
1835                   uint32_t handle, bool dumb,
1836                   uint64_t *offset)
1837 {
1838         struct drm_i915_private *dev_priv = dev->dev_private;
1839         struct drm_i915_gem_object *obj;
1840         int ret;
1841
1842         ret = i915_mutex_lock_interruptible(dev);
1843         if (ret)
1844                 return ret;
1845
1846         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1847         if (&obj->base == NULL) {
1848                 ret = -ENOENT;
1849                 goto unlock;
1850         }
1851
1852         /*
1853          * We don't allow dumb mmaps on objects created using another
1854          * interface.
1855          */
1856         WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
1857                   "Illegal dumb map of accelerated buffer.\n");
1858
1859         if (obj->base.size > dev_priv->gtt.mappable_end) {
1860                 ret = -E2BIG;
1861                 goto out;
1862         }
1863
1864         if (obj->madv != I915_MADV_WILLNEED) {
1865                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1866                 ret = -EFAULT;
1867                 goto out;
1868         }
1869
1870         ret = i915_gem_object_create_mmap_offset(obj);
1871         if (ret)
1872                 goto out;
1873
1874         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1875
1876 out:
1877         drm_gem_object_unreference(&obj->base);
1878 unlock:
1879         mutex_unlock(&dev->struct_mutex);
1880         return ret;
1881 }
1882
1883 int
1884 i915_gem_dumb_map_offset(struct drm_file *file,
1885                          struct drm_device *dev,
1886                          uint32_t handle,
1887                          uint64_t *offset)
1888 {
1889         return i915_gem_mmap_gtt(file, dev, handle, true, offset);
1890 }
1891
1892 /**
1893  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1894  * @dev: DRM device
1895  * @data: GTT mapping ioctl data
1896  * @file: GEM object info
1897  *
1898  * Simply returns the fake offset to userspace so it can mmap it.
1899  * The mmap call will end up in drm_gem_mmap(), which will set things
1900  * up so we can get faults in the handler above.
1901  *
1902  * The fault handler will take care of binding the object into the GTT
1903  * (since it may have been evicted to make room for something), allocating
1904  * a fence register, and mapping the appropriate aperture address into
1905  * userspace.
1906  */
1907 int
1908 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1909                         struct drm_file *file)
1910 {
1911         struct drm_i915_gem_mmap_gtt *args = data;
1912
1913         return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
1914 }
1915
1916 static inline int
1917 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1918 {
1919         return obj->madv == I915_MADV_DONTNEED;
1920 }
1921
1922 /* Immediately discard the backing storage */
1923 static void
1924 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1925 {
1926         i915_gem_object_free_mmap_offset(obj);
1927
1928         if (obj->base.filp == NULL)
1929                 return;
1930
1931         /* Our goal here is to return as much of the memory as
1932          * is possible back to the system as we are called from OOM.
1933          * To do this we must instruct the shmfs to drop all of its
1934          * backing pages, *now*.
1935          */
1936         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1937         obj->madv = __I915_MADV_PURGED;
1938 }
1939
1940 /* Try to discard unwanted pages */
1941 static void
1942 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1943 {
1944         struct address_space *mapping;
1945
1946         switch (obj->madv) {
1947         case I915_MADV_DONTNEED:
1948                 i915_gem_object_truncate(obj);
1949         case __I915_MADV_PURGED:
1950                 return;
1951         }
1952
1953         if (obj->base.filp == NULL)
1954                 return;
1955
1956         mapping = file_inode(obj->base.filp)->i_mapping,
1957         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1958 }
1959
1960 static void
1961 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1962 {
1963         struct sg_page_iter sg_iter;
1964         int ret;
1965
1966         BUG_ON(obj->madv == __I915_MADV_PURGED);
1967
1968         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1969         if (ret) {
1970                 /* In the event of a disaster, abandon all caches and
1971                  * hope for the best.
1972                  */
1973                 WARN_ON(ret != -EIO);
1974                 i915_gem_clflush_object(obj, true);
1975                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1976         }
1977
1978         if (i915_gem_object_needs_bit17_swizzle(obj))
1979                 i915_gem_object_save_bit_17_swizzle(obj);
1980
1981         if (obj->madv == I915_MADV_DONTNEED)
1982                 obj->dirty = 0;
1983
1984         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1985                 struct page *page = sg_page_iter_page(&sg_iter);
1986
1987                 if (obj->dirty)
1988                         set_page_dirty(page);
1989
1990                 if (obj->madv == I915_MADV_WILLNEED)
1991                         mark_page_accessed(page);
1992
1993                 page_cache_release(page);
1994         }
1995         obj->dirty = 0;
1996
1997         sg_free_table(obj->pages);
1998         kfree(obj->pages);
1999 }
2000
2001 int
2002 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2003 {
2004         const struct drm_i915_gem_object_ops *ops = obj->ops;
2005
2006         if (obj->pages == NULL)
2007                 return 0;
2008
2009         if (obj->pages_pin_count)
2010                 return -EBUSY;
2011
2012         BUG_ON(i915_gem_obj_bound_any(obj));
2013
2014         /* ->put_pages might need to allocate memory for the bit17 swizzle
2015          * array, hence protect them from being reaped by removing them from gtt
2016          * lists early. */
2017         list_del(&obj->global_list);
2018
2019         ops->put_pages(obj);
2020         obj->pages = NULL;
2021
2022         i915_gem_object_invalidate(obj);
2023
2024         return 0;
2025 }
2026
2027 unsigned long
2028 i915_gem_shrink(struct drm_i915_private *dev_priv,
2029                 long target, unsigned flags)
2030 {
2031         const struct {
2032                 struct list_head *list;
2033                 unsigned int bit;
2034         } phases[] = {
2035                 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
2036                 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
2037                 { NULL, 0 },
2038         }, *phase;
2039         unsigned long count = 0;
2040
2041         /*
2042          * As we may completely rewrite the (un)bound list whilst unbinding
2043          * (due to retiring requests) we have to strictly process only
2044          * one element of the list at the time, and recheck the list
2045          * on every iteration.
2046          *
2047          * In particular, we must hold a reference whilst removing the
2048          * object as we may end up waiting for and/or retiring the objects.
2049          * This might release the final reference (held by the active list)
2050          * and result in the object being freed from under us. This is
2051          * similar to the precautions the eviction code must take whilst
2052          * removing objects.
2053          *
2054          * Also note that although these lists do not hold a reference to
2055          * the object we can safely grab one here: The final object
2056          * unreferencing and the bound_list are both protected by the
2057          * dev->struct_mutex and so we won't ever be able to observe an
2058          * object on the bound_list with a reference count equals 0.
2059          */
2060         for (phase = phases; phase->list; phase++) {
2061                 struct list_head still_in_list;
2062
2063                 if ((flags & phase->bit) == 0)
2064                         continue;
2065
2066                 INIT_LIST_HEAD(&still_in_list);
2067                 while (count < target && !list_empty(phase->list)) {
2068                         struct drm_i915_gem_object *obj;
2069                         struct i915_vma *vma, *v;
2070
2071                         obj = list_first_entry(phase->list,
2072                                                typeof(*obj), global_list);
2073                         list_move_tail(&obj->global_list, &still_in_list);
2074
2075                         if (flags & I915_SHRINK_PURGEABLE &&
2076                             !i915_gem_object_is_purgeable(obj))
2077                                 continue;
2078
2079                         drm_gem_object_reference(&obj->base);
2080
2081                         /* For the unbound phase, this should be a no-op! */
2082                         list_for_each_entry_safe(vma, v,
2083                                                  &obj->vma_list, vma_link)
2084                                 if (i915_vma_unbind(vma))
2085                                         break;
2086
2087                         if (i915_gem_object_put_pages(obj) == 0)
2088                                 count += obj->base.size >> PAGE_SHIFT;
2089
2090                         drm_gem_object_unreference(&obj->base);
2091                 }
2092                 list_splice(&still_in_list, phase->list);
2093         }
2094
2095         return count;
2096 }
2097
2098 static unsigned long
2099 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2100 {
2101         i915_gem_evict_everything(dev_priv->dev);
2102         return i915_gem_shrink(dev_priv, LONG_MAX,
2103                                I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
2104 }
2105
2106 static int
2107 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2108 {
2109         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2110         int page_count, i;
2111         struct address_space *mapping;
2112         struct sg_table *st;
2113         struct scatterlist *sg;
2114         struct sg_page_iter sg_iter;
2115         struct page *page;
2116         unsigned long last_pfn = 0;     /* suppress gcc warning */
2117         gfp_t gfp;
2118
2119         /* Assert that the object is not currently in any GPU domain. As it
2120          * wasn't in the GTT, there shouldn't be any way it could have been in
2121          * a GPU cache
2122          */
2123         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2124         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2125
2126         st = kmalloc(sizeof(*st), GFP_KERNEL);
2127         if (st == NULL)
2128                 return -ENOMEM;
2129
2130         page_count = obj->base.size / PAGE_SIZE;
2131         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2132                 kfree(st);
2133                 return -ENOMEM;
2134         }
2135
2136         /* Get the list of pages out of our struct file.  They'll be pinned
2137          * at this point until we release them.
2138          *
2139          * Fail silently without starting the shrinker
2140          */
2141         mapping = file_inode(obj->base.filp)->i_mapping;
2142         gfp = mapping_gfp_mask(mapping);
2143         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2144         gfp &= ~(__GFP_IO | __GFP_WAIT);
2145         sg = st->sgl;
2146         st->nents = 0;
2147         for (i = 0; i < page_count; i++) {
2148                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2149                 if (IS_ERR(page)) {
2150                         i915_gem_shrink(dev_priv,
2151                                         page_count,
2152                                         I915_SHRINK_BOUND |
2153                                         I915_SHRINK_UNBOUND |
2154                                         I915_SHRINK_PURGEABLE);
2155                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2156                 }
2157                 if (IS_ERR(page)) {
2158                         /* We've tried hard to allocate the memory by reaping
2159                          * our own buffer, now let the real VM do its job and
2160                          * go down in flames if truly OOM.
2161                          */
2162                         i915_gem_shrink_all(dev_priv);
2163                         page = shmem_read_mapping_page(mapping, i);
2164                         if (IS_ERR(page))
2165                                 goto err_pages;
2166                 }
2167 #ifdef CONFIG_SWIOTLB
2168                 if (swiotlb_nr_tbl()) {
2169                         st->nents++;
2170                         sg_set_page(sg, page, PAGE_SIZE, 0);
2171                         sg = sg_next(sg);
2172                         continue;
2173                 }
2174 #endif
2175                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2176                         if (i)
2177                                 sg = sg_next(sg);
2178                         st->nents++;
2179                         sg_set_page(sg, page, PAGE_SIZE, 0);
2180                 } else {
2181                         sg->length += PAGE_SIZE;
2182                 }
2183                 last_pfn = page_to_pfn(page);
2184
2185                 /* Check that the i965g/gm workaround works. */
2186                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2187         }
2188 #ifdef CONFIG_SWIOTLB
2189         if (!swiotlb_nr_tbl())
2190 #endif
2191                 sg_mark_end(sg);
2192         obj->pages = st;
2193
2194         if (i915_gem_object_needs_bit17_swizzle(obj))
2195                 i915_gem_object_do_bit_17_swizzle(obj);
2196
2197         if (obj->tiling_mode != I915_TILING_NONE &&
2198             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2199                 i915_gem_object_pin_pages(obj);
2200
2201         return 0;
2202
2203 err_pages:
2204         sg_mark_end(sg);
2205         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2206                 page_cache_release(sg_page_iter_page(&sg_iter));
2207         sg_free_table(st);
2208         kfree(st);
2209
2210         /* shmemfs first checks if there is enough memory to allocate the page
2211          * and reports ENOSPC should there be insufficient, along with the usual
2212          * ENOMEM for a genuine allocation failure.
2213          *
2214          * We use ENOSPC in our driver to mean that we have run out of aperture
2215          * space and so want to translate the error from shmemfs back to our
2216          * usual understanding of ENOMEM.
2217          */
2218         if (PTR_ERR(page) == -ENOSPC)
2219                 return -ENOMEM;
2220         else
2221                 return PTR_ERR(page);
2222 }
2223
2224 /* Ensure that the associated pages are gathered from the backing storage
2225  * and pinned into our object. i915_gem_object_get_pages() may be called
2226  * multiple times before they are released by a single call to
2227  * i915_gem_object_put_pages() - once the pages are no longer referenced
2228  * either as a result of memory pressure (reaping pages under the shrinker)
2229  * or as the object is itself released.
2230  */
2231 int
2232 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2233 {
2234         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2235         const struct drm_i915_gem_object_ops *ops = obj->ops;
2236         int ret;
2237
2238         if (obj->pages)
2239                 return 0;
2240
2241         if (obj->madv != I915_MADV_WILLNEED) {
2242                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2243                 return -EFAULT;
2244         }
2245
2246         BUG_ON(obj->pages_pin_count);
2247
2248         ret = ops->get_pages(obj);
2249         if (ret)
2250                 return ret;
2251
2252         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2253         return 0;
2254 }
2255
2256 static void
2257 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2258                                struct intel_engine_cs *ring)
2259 {
2260         u32 seqno = intel_ring_get_seqno(ring);
2261
2262         BUG_ON(ring == NULL);
2263         if (obj->ring != ring && obj->last_write_seqno) {
2264                 /* Keep the seqno relative to the current ring */
2265                 obj->last_write_seqno = seqno;
2266         }
2267         obj->ring = ring;
2268
2269         /* Add a reference if we're newly entering the active list. */
2270         if (!obj->active) {
2271                 drm_gem_object_reference(&obj->base);
2272                 obj->active = 1;
2273         }
2274
2275         list_move_tail(&obj->ring_list, &ring->active_list);
2276
2277         obj->last_read_seqno = seqno;
2278 }
2279
2280 void i915_vma_move_to_active(struct i915_vma *vma,
2281                              struct intel_engine_cs *ring)
2282 {
2283         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2284         return i915_gem_object_move_to_active(vma->obj, ring);
2285 }
2286
2287 static void
2288 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2289 {
2290         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2291         struct i915_address_space *vm;
2292         struct i915_vma *vma;
2293
2294         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2295         BUG_ON(!obj->active);
2296
2297         list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2298                 vma = i915_gem_obj_to_vma(obj, vm);
2299                 if (vma && !list_empty(&vma->mm_list))
2300                         list_move_tail(&vma->mm_list, &vm->inactive_list);
2301         }
2302
2303         intel_fb_obj_flush(obj, true);
2304
2305         list_del_init(&obj->ring_list);
2306         obj->ring = NULL;
2307
2308         obj->last_read_seqno = 0;
2309         obj->last_write_seqno = 0;
2310         obj->base.write_domain = 0;
2311
2312         obj->last_fenced_seqno = 0;
2313
2314         obj->active = 0;
2315         drm_gem_object_unreference(&obj->base);
2316
2317         WARN_ON(i915_verify_lists(dev));
2318 }
2319
2320 static void
2321 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2322 {
2323         struct intel_engine_cs *ring = obj->ring;
2324
2325         if (ring == NULL)
2326                 return;
2327
2328         if (i915_seqno_passed(ring->get_seqno(ring, true),
2329                               obj->last_read_seqno))
2330                 i915_gem_object_move_to_inactive(obj);
2331 }
2332
2333 static int
2334 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2335 {
2336         struct drm_i915_private *dev_priv = dev->dev_private;
2337         struct intel_engine_cs *ring;
2338         int ret, i, j;
2339
2340         /* Carefully retire all requests without writing to the rings */
2341         for_each_ring(ring, dev_priv, i) {
2342                 ret = intel_ring_idle(ring);
2343                 if (ret)
2344                         return ret;
2345         }
2346         i915_gem_retire_requests(dev);
2347
2348         /* Finally reset hw state */
2349         for_each_ring(ring, dev_priv, i) {
2350                 intel_ring_init_seqno(ring, seqno);
2351
2352                 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2353                         ring->semaphore.sync_seqno[j] = 0;
2354         }
2355
2356         return 0;
2357 }
2358
2359 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2360 {
2361         struct drm_i915_private *dev_priv = dev->dev_private;
2362         int ret;
2363
2364         if (seqno == 0)
2365                 return -EINVAL;
2366
2367         /* HWS page needs to be set less than what we
2368          * will inject to ring
2369          */
2370         ret = i915_gem_init_seqno(dev, seqno - 1);
2371         if (ret)
2372                 return ret;
2373
2374         /* Carefully set the last_seqno value so that wrap
2375          * detection still works
2376          */
2377         dev_priv->next_seqno = seqno;
2378         dev_priv->last_seqno = seqno - 1;
2379         if (dev_priv->last_seqno == 0)
2380                 dev_priv->last_seqno--;
2381
2382         return 0;
2383 }
2384
2385 int
2386 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2387 {
2388         struct drm_i915_private *dev_priv = dev->dev_private;
2389
2390         /* reserve 0 for non-seqno */
2391         if (dev_priv->next_seqno == 0) {
2392                 int ret = i915_gem_init_seqno(dev, 0);
2393                 if (ret)
2394                         return ret;
2395
2396                 dev_priv->next_seqno = 1;
2397         }
2398
2399         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2400         return 0;
2401 }
2402
2403 int __i915_add_request(struct intel_engine_cs *ring,
2404                        struct drm_file *file,
2405                        struct drm_i915_gem_object *obj,
2406                        u32 *out_seqno)
2407 {
2408         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2409         struct drm_i915_gem_request *request;
2410         struct intel_ringbuffer *ringbuf;
2411         u32 request_ring_position, request_start;
2412         int ret;
2413
2414         request = ring->preallocated_lazy_request;
2415         if (WARN_ON(request == NULL))
2416                 return -ENOMEM;
2417
2418         if (i915.enable_execlists) {
2419                 struct intel_context *ctx = request->ctx;
2420                 ringbuf = ctx->engine[ring->id].ringbuf;
2421         } else
2422                 ringbuf = ring->buffer;
2423
2424         request_start = intel_ring_get_tail(ringbuf);
2425         /*
2426          * Emit any outstanding flushes - execbuf can fail to emit the flush
2427          * after having emitted the batchbuffer command. Hence we need to fix
2428          * things up similar to emitting the lazy request. The difference here
2429          * is that the flush _must_ happen before the next request, no matter
2430          * what.
2431          */
2432         if (i915.enable_execlists) {
2433                 ret = logical_ring_flush_all_caches(ringbuf);
2434                 if (ret)
2435                         return ret;
2436         } else {
2437                 ret = intel_ring_flush_all_caches(ring);
2438                 if (ret)
2439                         return ret;
2440         }
2441
2442         /* Record the position of the start of the request so that
2443          * should we detect the updated seqno part-way through the
2444          * GPU processing the request, we never over-estimate the
2445          * position of the head.
2446          */
2447         request_ring_position = intel_ring_get_tail(ringbuf);
2448
2449         if (i915.enable_execlists) {
2450                 ret = ring->emit_request(ringbuf);
2451                 if (ret)
2452                         return ret;
2453         } else {
2454                 ret = ring->add_request(ring);
2455                 if (ret)
2456                         return ret;
2457         }
2458
2459         request->seqno = intel_ring_get_seqno(ring);
2460         request->ring = ring;
2461         request->head = request_start;
2462         request->tail = request_ring_position;
2463
2464         /* Whilst this request exists, batch_obj will be on the
2465          * active_list, and so will hold the active reference. Only when this
2466          * request is retired will the the batch_obj be moved onto the
2467          * inactive_list and lose its active reference. Hence we do not need
2468          * to explicitly hold another reference here.
2469          */
2470         request->batch_obj = obj;
2471
2472         if (!i915.enable_execlists) {
2473                 /* Hold a reference to the current context so that we can inspect
2474                  * it later in case a hangcheck error event fires.
2475                  */
2476                 request->ctx = ring->last_context;
2477                 if (request->ctx)
2478                         i915_gem_context_reference(request->ctx);
2479         }
2480
2481         request->emitted_jiffies = jiffies;
2482         list_add_tail(&request->list, &ring->request_list);
2483         request->file_priv = NULL;
2484
2485         if (file) {
2486                 struct drm_i915_file_private *file_priv = file->driver_priv;
2487
2488                 spin_lock(&file_priv->mm.lock);
2489                 request->file_priv = file_priv;
2490                 list_add_tail(&request->client_list,
2491                               &file_priv->mm.request_list);
2492                 spin_unlock(&file_priv->mm.lock);
2493         }
2494
2495         trace_i915_gem_request_add(ring, request->seqno);
2496         ring->outstanding_lazy_seqno = 0;
2497         ring->preallocated_lazy_request = NULL;
2498
2499         i915_queue_hangcheck(ring->dev);
2500
2501         cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2502         queue_delayed_work(dev_priv->wq,
2503                            &dev_priv->mm.retire_work,
2504                            round_jiffies_up_relative(HZ));
2505         intel_mark_busy(dev_priv->dev);
2506
2507         if (out_seqno)
2508                 *out_seqno = request->seqno;
2509         return 0;
2510 }
2511
2512 static inline void
2513 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2514 {
2515         struct drm_i915_file_private *file_priv = request->file_priv;
2516
2517         if (!file_priv)
2518                 return;
2519
2520         spin_lock(&file_priv->mm.lock);
2521         list_del(&request->client_list);
2522         request->file_priv = NULL;
2523         spin_unlock(&file_priv->mm.lock);
2524 }
2525
2526 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2527                                    const struct intel_context *ctx)
2528 {
2529         unsigned long elapsed;
2530
2531         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2532
2533         if (ctx->hang_stats.banned)
2534                 return true;
2535
2536         if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2537                 if (!i915_gem_context_is_default(ctx)) {
2538                         DRM_DEBUG("context hanging too fast, banning!\n");
2539                         return true;
2540                 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2541                         if (i915_stop_ring_allow_warn(dev_priv))
2542                                 DRM_ERROR("gpu hanging too fast, banning!\n");
2543                         return true;
2544                 }
2545         }
2546
2547         return false;
2548 }
2549
2550 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2551                                   struct intel_context *ctx,
2552                                   const bool guilty)
2553 {
2554         struct i915_ctx_hang_stats *hs;
2555
2556         if (WARN_ON(!ctx))
2557                 return;
2558
2559         hs = &ctx->hang_stats;
2560
2561         if (guilty) {
2562                 hs->banned = i915_context_is_banned(dev_priv, ctx);
2563                 hs->batch_active++;
2564                 hs->guilty_ts = get_seconds();
2565         } else {
2566                 hs->batch_pending++;
2567         }
2568 }
2569
2570 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2571 {
2572         list_del(&request->list);
2573         i915_gem_request_remove_from_client(request);
2574
2575         i915_gem_request_unreference(request);
2576 }
2577
2578 void i915_gem_request_free(struct kref *req_ref)
2579 {
2580         struct drm_i915_gem_request *req = container_of(req_ref,
2581                                                  typeof(*req), ref);
2582         struct intel_context *ctx = req->ctx;
2583
2584         if (ctx) {
2585                 if (i915.enable_execlists) {
2586                         struct intel_engine_cs *ring = req->ring;
2587
2588                         if (ctx != ring->default_context)
2589                                 intel_lr_context_unpin(ring, ctx);
2590                 }
2591
2592                 i915_gem_context_unreference(ctx);
2593         }
2594
2595         kfree(req);
2596 }
2597
2598 struct drm_i915_gem_request *
2599 i915_gem_find_active_request(struct intel_engine_cs *ring)
2600 {
2601         struct drm_i915_gem_request *request;
2602         u32 completed_seqno;
2603
2604         completed_seqno = ring->get_seqno(ring, false);
2605
2606         list_for_each_entry(request, &ring->request_list, list) {
2607                 if (i915_seqno_passed(completed_seqno, request->seqno))
2608                         continue;
2609
2610                 return request;
2611         }
2612
2613         return NULL;
2614 }
2615
2616 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2617                                        struct intel_engine_cs *ring)
2618 {
2619         struct drm_i915_gem_request *request;
2620         bool ring_hung;
2621
2622         request = i915_gem_find_active_request(ring);
2623
2624         if (request == NULL)
2625                 return;
2626
2627         ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2628
2629         i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2630
2631         list_for_each_entry_continue(request, &ring->request_list, list)
2632                 i915_set_reset_status(dev_priv, request->ctx, false);
2633 }
2634
2635 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2636                                         struct intel_engine_cs *ring)
2637 {
2638         while (!list_empty(&ring->active_list)) {
2639                 struct drm_i915_gem_object *obj;
2640
2641                 obj = list_first_entry(&ring->active_list,
2642                                        struct drm_i915_gem_object,
2643                                        ring_list);
2644
2645                 i915_gem_object_move_to_inactive(obj);
2646         }
2647
2648         /*
2649          * Clear the execlists queue up before freeing the requests, as those
2650          * are the ones that keep the context and ringbuffer backing objects
2651          * pinned in place.
2652          */
2653         while (!list_empty(&ring->execlist_queue)) {
2654                 struct intel_ctx_submit_request *submit_req;
2655
2656                 submit_req = list_first_entry(&ring->execlist_queue,
2657                                 struct intel_ctx_submit_request,
2658                                 execlist_link);
2659                 list_del(&submit_req->execlist_link);
2660                 intel_runtime_pm_put(dev_priv);
2661                 i915_gem_context_unreference(submit_req->ctx);
2662                 kfree(submit_req);
2663         }
2664
2665         /*
2666          * We must free the requests after all the corresponding objects have
2667          * been moved off active lists. Which is the same order as the normal
2668          * retire_requests function does. This is important if object hold
2669          * implicit references on things like e.g. ppgtt address spaces through
2670          * the request.
2671          */
2672         while (!list_empty(&ring->request_list)) {
2673                 struct drm_i915_gem_request *request;
2674
2675                 request = list_first_entry(&ring->request_list,
2676                                            struct drm_i915_gem_request,
2677                                            list);
2678
2679                 i915_gem_free_request(request);
2680         }
2681
2682         /* These may not have been flush before the reset, do so now */
2683         i915_gem_request_assign(&ring->preallocated_lazy_request, NULL);
2684         ring->outstanding_lazy_seqno = 0;
2685 }
2686
2687 void i915_gem_restore_fences(struct drm_device *dev)
2688 {
2689         struct drm_i915_private *dev_priv = dev->dev_private;
2690         int i;
2691
2692         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2693                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2694
2695                 /*
2696                  * Commit delayed tiling changes if we have an object still
2697                  * attached to the fence, otherwise just clear the fence.
2698                  */
2699                 if (reg->obj) {
2700                         i915_gem_object_update_fence(reg->obj, reg,
2701                                                      reg->obj->tiling_mode);
2702                 } else {
2703                         i915_gem_write_fence(dev, i, NULL);
2704                 }
2705         }
2706 }
2707
2708 void i915_gem_reset(struct drm_device *dev)
2709 {
2710         struct drm_i915_private *dev_priv = dev->dev_private;
2711         struct intel_engine_cs *ring;
2712         int i;
2713
2714         /*
2715          * Before we free the objects from the requests, we need to inspect
2716          * them for finding the guilty party. As the requests only borrow
2717          * their reference to the objects, the inspection must be done first.
2718          */
2719         for_each_ring(ring, dev_priv, i)
2720                 i915_gem_reset_ring_status(dev_priv, ring);
2721
2722         for_each_ring(ring, dev_priv, i)
2723                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2724
2725         i915_gem_context_reset(dev);
2726
2727         i915_gem_restore_fences(dev);
2728 }
2729
2730 /**
2731  * This function clears the request list as sequence numbers are passed.
2732  */
2733 void
2734 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2735 {
2736         uint32_t seqno;
2737
2738         if (list_empty(&ring->request_list))
2739                 return;
2740
2741         WARN_ON(i915_verify_lists(ring->dev));
2742
2743         seqno = ring->get_seqno(ring, true);
2744
2745         /* Move any buffers on the active list that are no longer referenced
2746          * by the ringbuffer to the flushing/inactive lists as appropriate,
2747          * before we free the context associated with the requests.
2748          */
2749         while (!list_empty(&ring->active_list)) {
2750                 struct drm_i915_gem_object *obj;
2751
2752                 obj = list_first_entry(&ring->active_list,
2753                                       struct drm_i915_gem_object,
2754                                       ring_list);
2755
2756                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2757                         break;
2758
2759                 i915_gem_object_move_to_inactive(obj);
2760         }
2761
2762
2763         while (!list_empty(&ring->request_list)) {
2764                 struct drm_i915_gem_request *request;
2765                 struct intel_ringbuffer *ringbuf;
2766
2767                 request = list_first_entry(&ring->request_list,
2768                                            struct drm_i915_gem_request,
2769                                            list);
2770
2771                 if (!i915_seqno_passed(seqno, request->seqno))
2772                         break;
2773
2774                 trace_i915_gem_request_retire(ring, request->seqno);
2775
2776                 /* This is one of the few common intersection points
2777                  * between legacy ringbuffer submission and execlists:
2778                  * we need to tell them apart in order to find the correct
2779                  * ringbuffer to which the request belongs to.
2780                  */
2781                 if (i915.enable_execlists) {
2782                         struct intel_context *ctx = request->ctx;
2783                         ringbuf = ctx->engine[ring->id].ringbuf;
2784                 } else
2785                         ringbuf = ring->buffer;
2786
2787                 /* We know the GPU must have read the request to have
2788                  * sent us the seqno + interrupt, so use the position
2789                  * of tail of the request to update the last known position
2790                  * of the GPU head.
2791                  */
2792                 ringbuf->last_retired_head = request->tail;
2793
2794                 i915_gem_free_request(request);
2795         }
2796
2797         if (unlikely(ring->trace_irq_seqno &&
2798                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2799                 ring->irq_put(ring);
2800                 ring->trace_irq_seqno = 0;
2801         }
2802
2803         WARN_ON(i915_verify_lists(ring->dev));
2804 }
2805
2806 bool
2807 i915_gem_retire_requests(struct drm_device *dev)
2808 {
2809         struct drm_i915_private *dev_priv = dev->dev_private;
2810         struct intel_engine_cs *ring;
2811         bool idle = true;
2812         int i;
2813
2814         for_each_ring(ring, dev_priv, i) {
2815                 i915_gem_retire_requests_ring(ring);
2816                 idle &= list_empty(&ring->request_list);
2817                 if (i915.enable_execlists) {
2818                         unsigned long flags;
2819
2820                         spin_lock_irqsave(&ring->execlist_lock, flags);
2821                         idle &= list_empty(&ring->execlist_queue);
2822                         spin_unlock_irqrestore(&ring->execlist_lock, flags);
2823
2824                         intel_execlists_retire_requests(ring);
2825                 }
2826         }
2827
2828         if (idle)
2829                 mod_delayed_work(dev_priv->wq,
2830                                    &dev_priv->mm.idle_work,
2831                                    msecs_to_jiffies(100));
2832
2833         return idle;
2834 }
2835
2836 static void
2837 i915_gem_retire_work_handler(struct work_struct *work)
2838 {
2839         struct drm_i915_private *dev_priv =
2840                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2841         struct drm_device *dev = dev_priv->dev;
2842         bool idle;
2843
2844         /* Come back later if the device is busy... */
2845         idle = false;
2846         if (mutex_trylock(&dev->struct_mutex)) {
2847                 idle = i915_gem_retire_requests(dev);
2848                 mutex_unlock(&dev->struct_mutex);
2849         }
2850         if (!idle)
2851                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2852                                    round_jiffies_up_relative(HZ));
2853 }
2854
2855 static void
2856 i915_gem_idle_work_handler(struct work_struct *work)
2857 {
2858         struct drm_i915_private *dev_priv =
2859                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2860
2861         intel_mark_idle(dev_priv->dev);
2862 }
2863
2864 /**
2865  * Ensures that an object will eventually get non-busy by flushing any required
2866  * write domains, emitting any outstanding lazy request and retiring and
2867  * completed requests.
2868  */
2869 static int
2870 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2871 {
2872         int ret;
2873
2874         if (obj->active) {
2875                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2876                 if (ret)
2877                         return ret;
2878
2879                 i915_gem_retire_requests_ring(obj->ring);
2880         }
2881
2882         return 0;
2883 }
2884
2885 /**
2886  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2887  * @DRM_IOCTL_ARGS: standard ioctl arguments
2888  *
2889  * Returns 0 if successful, else an error is returned with the remaining time in
2890  * the timeout parameter.
2891  *  -ETIME: object is still busy after timeout
2892  *  -ERESTARTSYS: signal interrupted the wait
2893  *  -ENONENT: object doesn't exist
2894  * Also possible, but rare:
2895  *  -EAGAIN: GPU wedged
2896  *  -ENOMEM: damn
2897  *  -ENODEV: Internal IRQ fail
2898  *  -E?: The add request failed
2899  *
2900  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2901  * non-zero timeout parameter the wait ioctl will wait for the given number of
2902  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2903  * without holding struct_mutex the object may become re-busied before this
2904  * function completes. A similar but shorter * race condition exists in the busy
2905  * ioctl
2906  */
2907 int
2908 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2909 {
2910         struct drm_i915_private *dev_priv = dev->dev_private;
2911         struct drm_i915_gem_wait *args = data;
2912         struct drm_i915_gem_object *obj;
2913         struct intel_engine_cs *ring = NULL;
2914         unsigned reset_counter;
2915         u32 seqno = 0;
2916         int ret = 0;
2917
2918         if (args->flags != 0)
2919                 return -EINVAL;
2920
2921         ret = i915_mutex_lock_interruptible(dev);
2922         if (ret)
2923                 return ret;
2924
2925         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2926         if (&obj->base == NULL) {
2927                 mutex_unlock(&dev->struct_mutex);
2928                 return -ENOENT;
2929         }
2930
2931         /* Need to make sure the object gets inactive eventually. */
2932         ret = i915_gem_object_flush_active(obj);
2933         if (ret)
2934                 goto out;
2935
2936         if (obj->active) {
2937                 seqno = obj->last_read_seqno;
2938                 ring = obj->ring;
2939         }
2940
2941         if (seqno == 0)
2942                  goto out;
2943
2944         /* Do this after OLR check to make sure we make forward progress polling
2945          * on this IOCTL with a timeout <=0 (like busy ioctl)
2946          */
2947         if (args->timeout_ns <= 0) {
2948                 ret = -ETIME;
2949                 goto out;
2950         }
2951
2952         drm_gem_object_unreference(&obj->base);
2953         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2954         mutex_unlock(&dev->struct_mutex);
2955
2956         return __i915_wait_seqno(ring, seqno, reset_counter, true,
2957                                  &args->timeout_ns, file->driver_priv);
2958
2959 out:
2960         drm_gem_object_unreference(&obj->base);
2961         mutex_unlock(&dev->struct_mutex);
2962         return ret;
2963 }
2964
2965 /**
2966  * i915_gem_object_sync - sync an object to a ring.
2967  *
2968  * @obj: object which may be in use on another ring.
2969  * @to: ring we wish to use the object on. May be NULL.
2970  *
2971  * This code is meant to abstract object synchronization with the GPU.
2972  * Calling with NULL implies synchronizing the object with the CPU
2973  * rather than a particular GPU ring.
2974  *
2975  * Returns 0 if successful, else propagates up the lower layer error.
2976  */
2977 int
2978 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2979                      struct intel_engine_cs *to)
2980 {
2981         struct intel_engine_cs *from = obj->ring;
2982         u32 seqno;
2983         int ret, idx;
2984
2985         if (from == NULL || to == from)
2986                 return 0;
2987
2988         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2989                 return i915_gem_object_wait_rendering(obj, false);
2990
2991         idx = intel_ring_sync_index(from, to);
2992
2993         seqno = obj->last_read_seqno;
2994         /* Optimization: Avoid semaphore sync when we are sure we already
2995          * waited for an object with higher seqno */
2996         if (seqno <= from->semaphore.sync_seqno[idx])
2997                 return 0;
2998
2999         ret = i915_gem_check_olr(obj->ring, seqno);
3000         if (ret)
3001                 return ret;
3002
3003         trace_i915_gem_ring_sync_to(from, to, seqno);
3004         ret = to->semaphore.sync_to(to, from, seqno);
3005         if (!ret)
3006                 /* We use last_read_seqno because sync_to()
3007                  * might have just caused seqno wrap under
3008                  * the radar.
3009                  */
3010                 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
3011
3012         return ret;
3013 }
3014
3015 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3016 {
3017         u32 old_write_domain, old_read_domains;
3018
3019         /* Force a pagefault for domain tracking on next user access */
3020         i915_gem_release_mmap(obj);
3021
3022         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3023                 return;
3024
3025         /* Wait for any direct GTT access to complete */
3026         mb();
3027
3028         old_read_domains = obj->base.read_domains;
3029         old_write_domain = obj->base.write_domain;
3030
3031         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3032         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3033
3034         trace_i915_gem_object_change_domain(obj,
3035                                             old_read_domains,
3036                                             old_write_domain);
3037 }
3038
3039 int i915_vma_unbind(struct i915_vma *vma)
3040 {
3041         struct drm_i915_gem_object *obj = vma->obj;
3042         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3043         int ret;
3044
3045         if (list_empty(&vma->vma_link))
3046                 return 0;
3047
3048         if (!drm_mm_node_allocated(&vma->node)) {
3049                 i915_gem_vma_destroy(vma);
3050                 return 0;
3051         }
3052
3053         if (vma->pin_count)
3054                 return -EBUSY;
3055
3056         BUG_ON(obj->pages == NULL);
3057
3058         ret = i915_gem_object_finish_gpu(obj);
3059         if (ret)
3060                 return ret;
3061         /* Continue on if we fail due to EIO, the GPU is hung so we
3062          * should be safe and we need to cleanup or else we might
3063          * cause memory corruption through use-after-free.
3064          */
3065
3066         /* Throw away the active reference before moving to the unbound list */
3067         i915_gem_object_retire(obj);
3068
3069         if (i915_is_ggtt(vma->vm)) {
3070                 i915_gem_object_finish_gtt(obj);
3071
3072                 /* release the fence reg _after_ flushing */
3073                 ret = i915_gem_object_put_fence(obj);
3074                 if (ret)
3075                         return ret;
3076         }
3077
3078         trace_i915_vma_unbind(vma);
3079
3080         vma->unbind_vma(vma);
3081
3082         list_del_init(&vma->mm_list);
3083         if (i915_is_ggtt(vma->vm))
3084                 obj->map_and_fenceable = false;
3085
3086         drm_mm_remove_node(&vma->node);
3087         i915_gem_vma_destroy(vma);
3088
3089         /* Since the unbound list is global, only move to that list if
3090          * no more VMAs exist. */
3091         if (list_empty(&obj->vma_list)) {
3092                 i915_gem_gtt_finish_object(obj);
3093                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3094         }
3095
3096         /* And finally now the object is completely decoupled from this vma,
3097          * we can drop its hold on the backing storage and allow it to be
3098          * reaped by the shrinker.
3099          */
3100         i915_gem_object_unpin_pages(obj);
3101
3102         return 0;
3103 }
3104
3105 int i915_gpu_idle(struct drm_device *dev)
3106 {
3107         struct drm_i915_private *dev_priv = dev->dev_private;
3108         struct intel_engine_cs *ring;
3109         int ret, i;
3110
3111         /* Flush everything onto the inactive list. */
3112         for_each_ring(ring, dev_priv, i) {
3113                 if (!i915.enable_execlists) {
3114                         ret = i915_switch_context(ring, ring->default_context);
3115                         if (ret)
3116                                 return ret;
3117                 }
3118
3119                 ret = intel_ring_idle(ring);
3120                 if (ret)
3121                         return ret;
3122         }
3123
3124         return 0;
3125 }
3126
3127 static void i965_write_fence_reg(struct drm_device *dev, int reg,
3128                                  struct drm_i915_gem_object *obj)
3129 {
3130         struct drm_i915_private *dev_priv = dev->dev_private;
3131         int fence_reg;
3132         int fence_pitch_shift;
3133
3134         if (INTEL_INFO(dev)->gen >= 6) {
3135                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3136                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3137         } else {
3138                 fence_reg = FENCE_REG_965_0;
3139                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3140         }
3141
3142         fence_reg += reg * 8;
3143
3144         /* To w/a incoherency with non-atomic 64-bit register updates,
3145          * we split the 64-bit update into two 32-bit writes. In order
3146          * for a partial fence not to be evaluated between writes, we
3147          * precede the update with write to turn off the fence register,
3148          * and only enable the fence as the last step.
3149          *
3150          * For extra levels of paranoia, we make sure each step lands
3151          * before applying the next step.
3152          */
3153         I915_WRITE(fence_reg, 0);
3154         POSTING_READ(fence_reg);
3155
3156         if (obj) {
3157                 u32 size = i915_gem_obj_ggtt_size(obj);
3158                 uint64_t val;
3159
3160                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3161                                  0xfffff000) << 32;
3162                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3163                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3164                 if (obj->tiling_mode == I915_TILING_Y)
3165                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3166                 val |= I965_FENCE_REG_VALID;
3167
3168                 I915_WRITE(fence_reg + 4, val >> 32);
3169                 POSTING_READ(fence_reg + 4);
3170
3171                 I915_WRITE(fence_reg + 0, val);
3172                 POSTING_READ(fence_reg);
3173         } else {
3174                 I915_WRITE(fence_reg + 4, 0);
3175                 POSTING_READ(fence_reg + 4);
3176         }
3177 }
3178
3179 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3180                                  struct drm_i915_gem_object *obj)
3181 {
3182         struct drm_i915_private *dev_priv = dev->dev_private;
3183         u32 val;
3184
3185         if (obj) {
3186                 u32 size = i915_gem_obj_ggtt_size(obj);
3187                 int pitch_val;
3188                 int tile_width;
3189
3190                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3191                      (size & -size) != size ||
3192                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3193                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3194                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3195
3196                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3197                         tile_width = 128;
3198                 else
3199                         tile_width = 512;
3200
3201                 /* Note: pitch better be a power of two tile widths */
3202                 pitch_val = obj->stride / tile_width;
3203                 pitch_val = ffs(pitch_val) - 1;
3204
3205                 val = i915_gem_obj_ggtt_offset(obj);
3206                 if (obj->tiling_mode == I915_TILING_Y)
3207                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3208                 val |= I915_FENCE_SIZE_BITS(size);
3209                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3210                 val |= I830_FENCE_REG_VALID;
3211         } else
3212                 val = 0;
3213
3214         if (reg < 8)
3215                 reg = FENCE_REG_830_0 + reg * 4;
3216         else
3217                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3218
3219         I915_WRITE(reg, val);
3220         POSTING_READ(reg);
3221 }
3222
3223 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3224                                 struct drm_i915_gem_object *obj)
3225 {
3226         struct drm_i915_private *dev_priv = dev->dev_private;
3227         uint32_t val;
3228
3229         if (obj) {
3230                 u32 size = i915_gem_obj_ggtt_size(obj);
3231                 uint32_t pitch_val;
3232
3233                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3234                      (size & -size) != size ||
3235                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3236                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3237                      i915_gem_obj_ggtt_offset(obj), size);
3238
3239                 pitch_val = obj->stride / 128;
3240                 pitch_val = ffs(pitch_val) - 1;
3241
3242                 val = i915_gem_obj_ggtt_offset(obj);
3243                 if (obj->tiling_mode == I915_TILING_Y)
3244                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3245                 val |= I830_FENCE_SIZE_BITS(size);
3246                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3247                 val |= I830_FENCE_REG_VALID;
3248         } else
3249                 val = 0;
3250
3251         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3252         POSTING_READ(FENCE_REG_830_0 + reg * 4);
3253 }
3254
3255 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3256 {
3257         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3258 }
3259
3260 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3261                                  struct drm_i915_gem_object *obj)
3262 {
3263         struct drm_i915_private *dev_priv = dev->dev_private;
3264
3265         /* Ensure that all CPU reads are completed before installing a fence
3266          * and all writes before removing the fence.
3267          */
3268         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3269                 mb();
3270
3271         WARN(obj && (!obj->stride || !obj->tiling_mode),
3272              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3273              obj->stride, obj->tiling_mode);
3274
3275         switch (INTEL_INFO(dev)->gen) {
3276         case 9:
3277         case 8:
3278         case 7:
3279         case 6:
3280         case 5:
3281         case 4: i965_write_fence_reg(dev, reg, obj); break;
3282         case 3: i915_write_fence_reg(dev, reg, obj); break;
3283         case 2: i830_write_fence_reg(dev, reg, obj); break;
3284         default: BUG();
3285         }
3286
3287         /* And similarly be paranoid that no direct access to this region
3288          * is reordered to before the fence is installed.
3289          */
3290         if (i915_gem_object_needs_mb(obj))
3291                 mb();
3292 }
3293
3294 static inline int fence_number(struct drm_i915_private *dev_priv,
3295                                struct drm_i915_fence_reg *fence)
3296 {
3297         return fence - dev_priv->fence_regs;
3298 }
3299
3300 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3301                                          struct drm_i915_fence_reg *fence,
3302                                          bool enable)
3303 {
3304         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3305         int reg = fence_number(dev_priv, fence);
3306
3307         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3308
3309         if (enable) {
3310                 obj->fence_reg = reg;
3311                 fence->obj = obj;
3312                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3313         } else {
3314                 obj->fence_reg = I915_FENCE_REG_NONE;
3315                 fence->obj = NULL;
3316                 list_del_init(&fence->lru_list);
3317         }
3318         obj->fence_dirty = false;
3319 }
3320
3321 static int
3322 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3323 {
3324         if (obj->last_fenced_seqno) {
3325                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3326                 if (ret)
3327                         return ret;
3328
3329                 obj->last_fenced_seqno = 0;
3330         }
3331
3332         return 0;
3333 }
3334
3335 int
3336 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3337 {
3338         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3339         struct drm_i915_fence_reg *fence;
3340         int ret;
3341
3342         ret = i915_gem_object_wait_fence(obj);
3343         if (ret)
3344                 return ret;
3345
3346         if (obj->fence_reg == I915_FENCE_REG_NONE)
3347                 return 0;
3348
3349         fence = &dev_priv->fence_regs[obj->fence_reg];
3350
3351         if (WARN_ON(fence->pin_count))
3352                 return -EBUSY;
3353
3354         i915_gem_object_fence_lost(obj);
3355         i915_gem_object_update_fence(obj, fence, false);
3356
3357         return 0;
3358 }
3359
3360 static struct drm_i915_fence_reg *
3361 i915_find_fence_reg(struct drm_device *dev)
3362 {
3363         struct drm_i915_private *dev_priv = dev->dev_private;
3364         struct drm_i915_fence_reg *reg, *avail;
3365         int i;
3366
3367         /* First try to find a free reg */
3368         avail = NULL;
3369         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3370                 reg = &dev_priv->fence_regs[i];
3371                 if (!reg->obj)
3372                         return reg;
3373
3374                 if (!reg->pin_count)
3375                         avail = reg;
3376         }
3377
3378         if (avail == NULL)
3379                 goto deadlock;
3380
3381         /* None available, try to steal one or wait for a user to finish */
3382         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3383                 if (reg->pin_count)
3384                         continue;
3385
3386                 return reg;
3387         }
3388
3389 deadlock:
3390         /* Wait for completion of pending flips which consume fences */
3391         if (intel_has_pending_fb_unpin(dev))
3392                 return ERR_PTR(-EAGAIN);
3393
3394         return ERR_PTR(-EDEADLK);
3395 }
3396
3397 /**
3398  * i915_gem_object_get_fence - set up fencing for an object
3399  * @obj: object to map through a fence reg
3400  *
3401  * When mapping objects through the GTT, userspace wants to be able to write
3402  * to them without having to worry about swizzling if the object is tiled.
3403  * This function walks the fence regs looking for a free one for @obj,
3404  * stealing one if it can't find any.
3405  *
3406  * It then sets up the reg based on the object's properties: address, pitch
3407  * and tiling format.
3408  *
3409  * For an untiled surface, this removes any existing fence.
3410  */
3411 int
3412 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3413 {
3414         struct drm_device *dev = obj->base.dev;
3415         struct drm_i915_private *dev_priv = dev->dev_private;
3416         bool enable = obj->tiling_mode != I915_TILING_NONE;
3417         struct drm_i915_fence_reg *reg;
3418         int ret;
3419
3420         /* Have we updated the tiling parameters upon the object and so
3421          * will need to serialise the write to the associated fence register?
3422          */
3423         if (obj->fence_dirty) {
3424                 ret = i915_gem_object_wait_fence(obj);
3425                 if (ret)
3426                         return ret;
3427         }
3428
3429         /* Just update our place in the LRU if our fence is getting reused. */
3430         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3431                 reg = &dev_priv->fence_regs[obj->fence_reg];
3432                 if (!obj->fence_dirty) {
3433                         list_move_tail(&reg->lru_list,
3434                                        &dev_priv->mm.fence_list);
3435                         return 0;
3436                 }
3437         } else if (enable) {
3438                 if (WARN_ON(!obj->map_and_fenceable))
3439                         return -EINVAL;
3440
3441                 reg = i915_find_fence_reg(dev);
3442                 if (IS_ERR(reg))
3443                         return PTR_ERR(reg);
3444
3445                 if (reg->obj) {
3446                         struct drm_i915_gem_object *old = reg->obj;
3447
3448                         ret = i915_gem_object_wait_fence(old);
3449                         if (ret)
3450                                 return ret;
3451
3452                         i915_gem_object_fence_lost(old);
3453                 }
3454         } else
3455                 return 0;
3456
3457         i915_gem_object_update_fence(obj, reg, enable);
3458
3459         return 0;
3460 }
3461
3462 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3463                                      unsigned long cache_level)
3464 {
3465         struct drm_mm_node *gtt_space = &vma->node;
3466         struct drm_mm_node *other;
3467
3468         /*
3469          * On some machines we have to be careful when putting differing types
3470          * of snoopable memory together to avoid the prefetcher crossing memory
3471          * domains and dying. During vm initialisation, we decide whether or not
3472          * these constraints apply and set the drm_mm.color_adjust
3473          * appropriately.
3474          */
3475         if (vma->vm->mm.color_adjust == NULL)
3476                 return true;
3477
3478         if (!drm_mm_node_allocated(gtt_space))
3479                 return true;
3480
3481         if (list_empty(&gtt_space->node_list))
3482                 return true;
3483
3484         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3485         if (other->allocated && !other->hole_follows && other->color != cache_level)
3486                 return false;
3487
3488         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3489         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3490                 return false;
3491
3492         return true;
3493 }
3494
3495 /**
3496  * Finds free space in the GTT aperture and binds the object there.
3497  */
3498 static struct i915_vma *
3499 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3500                            struct i915_address_space *vm,
3501                            unsigned alignment,
3502                            uint64_t flags)
3503 {
3504         struct drm_device *dev = obj->base.dev;
3505         struct drm_i915_private *dev_priv = dev->dev_private;
3506         u32 size, fence_size, fence_alignment, unfenced_alignment;
3507         unsigned long start =
3508                 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3509         unsigned long end =
3510                 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3511         struct i915_vma *vma;
3512         int ret;
3513
3514         fence_size = i915_gem_get_gtt_size(dev,
3515                                            obj->base.size,
3516                                            obj->tiling_mode);
3517         fence_alignment = i915_gem_get_gtt_alignment(dev,
3518                                                      obj->base.size,
3519                                                      obj->tiling_mode, true);
3520         unfenced_alignment =
3521                 i915_gem_get_gtt_alignment(dev,
3522                                            obj->base.size,
3523                                            obj->tiling_mode, false);
3524
3525         if (alignment == 0)
3526                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3527                                                 unfenced_alignment;
3528         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3529                 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3530                 return ERR_PTR(-EINVAL);
3531         }
3532
3533         size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3534
3535         /* If the object is bigger than the entire aperture, reject it early
3536          * before evicting everything in a vain attempt to find space.
3537          */
3538         if (obj->base.size > end) {
3539                 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3540                           obj->base.size,
3541                           flags & PIN_MAPPABLE ? "mappable" : "total",
3542                           end);
3543                 return ERR_PTR(-E2BIG);
3544         }
3545
3546         ret = i915_gem_object_get_pages(obj);
3547         if (ret)
3548                 return ERR_PTR(ret);
3549
3550         i915_gem_object_pin_pages(obj);
3551
3552         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3553         if (IS_ERR(vma))
3554                 goto err_unpin;
3555
3556 search_free:
3557         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3558                                                   size, alignment,
3559                                                   obj->cache_level,
3560                                                   start, end,
3561                                                   DRM_MM_SEARCH_DEFAULT,
3562                                                   DRM_MM_CREATE_DEFAULT);
3563         if (ret) {
3564                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3565                                                obj->cache_level,
3566                                                start, end,
3567                                                flags);
3568                 if (ret == 0)
3569                         goto search_free;
3570
3571                 goto err_free_vma;
3572         }
3573         if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3574                 ret = -EINVAL;
3575                 goto err_remove_node;
3576         }
3577
3578         ret = i915_gem_gtt_prepare_object(obj);
3579         if (ret)
3580                 goto err_remove_node;
3581
3582         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3583         list_add_tail(&vma->mm_list, &vm->inactive_list);
3584
3585         trace_i915_vma_bind(vma, flags);
3586         vma->bind_vma(vma, obj->cache_level,
3587                       flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
3588
3589         return vma;
3590
3591 err_remove_node:
3592         drm_mm_remove_node(&vma->node);
3593 err_free_vma:
3594         i915_gem_vma_destroy(vma);
3595         vma = ERR_PTR(ret);
3596 err_unpin:
3597         i915_gem_object_unpin_pages(obj);
3598         return vma;
3599 }
3600
3601 bool
3602 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3603                         bool force)
3604 {
3605         /* If we don't have a page list set up, then we're not pinned
3606          * to GPU, and we can ignore the cache flush because it'll happen
3607          * again at bind time.
3608          */
3609         if (obj->pages == NULL)
3610                 return false;
3611
3612         /*
3613          * Stolen memory is always coherent with the GPU as it is explicitly
3614          * marked as wc by the system, or the system is cache-coherent.
3615          */
3616         if (obj->stolen || obj->phys_handle)
3617                 return false;
3618
3619         /* If the GPU is snooping the contents of the CPU cache,
3620          * we do not need to manually clear the CPU cache lines.  However,
3621          * the caches are only snooped when the render cache is
3622          * flushed/invalidated.  As we always have to emit invalidations
3623          * and flushes when moving into and out of the RENDER domain, correct
3624          * snooping behaviour occurs naturally as the result of our domain
3625          * tracking.
3626          */
3627         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3628                 return false;
3629
3630         trace_i915_gem_object_clflush(obj);
3631         drm_clflush_sg(obj->pages);
3632
3633         return true;
3634 }
3635
3636 /** Flushes the GTT write domain for the object if it's dirty. */
3637 static void
3638 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3639 {
3640         uint32_t old_write_domain;
3641
3642         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3643                 return;
3644
3645         /* No actual flushing is required for the GTT write domain.  Writes
3646          * to it immediately go to main memory as far as we know, so there's
3647          * no chipset flush.  It also doesn't land in render cache.
3648          *
3649          * However, we do have to enforce the order so that all writes through
3650          * the GTT land before any writes to the device, such as updates to
3651          * the GATT itself.
3652          */
3653         wmb();
3654
3655         old_write_domain = obj->base.write_domain;
3656         obj->base.write_domain = 0;
3657
3658         intel_fb_obj_flush(obj, false);
3659
3660         trace_i915_gem_object_change_domain(obj,
3661                                             obj->base.read_domains,
3662                                             old_write_domain);
3663 }
3664
3665 /** Flushes the CPU write domain for the object if it's dirty. */
3666 static void
3667 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3668                                        bool force)
3669 {
3670         uint32_t old_write_domain;
3671
3672         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3673                 return;
3674
3675         if (i915_gem_clflush_object(obj, force))
3676                 i915_gem_chipset_flush(obj->base.dev);
3677
3678         old_write_domain = obj->base.write_domain;
3679         obj->base.write_domain = 0;
3680
3681         intel_fb_obj_flush(obj, false);
3682
3683         trace_i915_gem_object_change_domain(obj,
3684                                             obj->base.read_domains,
3685                                             old_write_domain);
3686 }
3687
3688 /**
3689  * Moves a single object to the GTT read, and possibly write domain.
3690  *
3691  * This function returns when the move is complete, including waiting on
3692  * flushes to occur.
3693  */
3694 int
3695 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3696 {
3697         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3698         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3699         uint32_t old_write_domain, old_read_domains;
3700         int ret;
3701
3702         /* Not valid to be called on unbound objects. */
3703         if (vma == NULL)
3704                 return -EINVAL;
3705
3706         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3707                 return 0;
3708
3709         ret = i915_gem_object_wait_rendering(obj, !write);
3710         if (ret)
3711                 return ret;
3712
3713         i915_gem_object_retire(obj);
3714         i915_gem_object_flush_cpu_write_domain(obj, false);
3715
3716         /* Serialise direct access to this object with the barriers for
3717          * coherent writes from the GPU, by effectively invalidating the
3718          * GTT domain upon first access.
3719          */
3720         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3721                 mb();
3722
3723         old_write_domain = obj->base.write_domain;
3724         old_read_domains = obj->base.read_domains;
3725
3726         /* It should now be out of any other write domains, and we can update
3727          * the domain values for our changes.
3728          */
3729         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3730         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3731         if (write) {
3732                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3733                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3734                 obj->dirty = 1;
3735         }
3736
3737         if (write)
3738                 intel_fb_obj_invalidate(obj, NULL);
3739
3740         trace_i915_gem_object_change_domain(obj,
3741                                             old_read_domains,
3742                                             old_write_domain);
3743
3744         /* And bump the LRU for this access */
3745         if (i915_gem_object_is_inactive(obj))
3746                 list_move_tail(&vma->mm_list,
3747                                &dev_priv->gtt.base.inactive_list);
3748
3749         return 0;
3750 }
3751
3752 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3753                                     enum i915_cache_level cache_level)
3754 {
3755         struct drm_device *dev = obj->base.dev;
3756         struct i915_vma *vma, *next;
3757         int ret;
3758
3759         if (obj->cache_level == cache_level)
3760                 return 0;
3761
3762         if (i915_gem_obj_is_pinned(obj)) {
3763                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3764                 return -EBUSY;
3765         }
3766
3767         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3768                 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3769                         ret = i915_vma_unbind(vma);
3770                         if (ret)
3771                                 return ret;
3772                 }
3773         }
3774
3775         if (i915_gem_obj_bound_any(obj)) {
3776                 ret = i915_gem_object_finish_gpu(obj);
3777                 if (ret)
3778                         return ret;
3779
3780                 i915_gem_object_finish_gtt(obj);
3781
3782                 /* Before SandyBridge, you could not use tiling or fence
3783                  * registers with snooped memory, so relinquish any fences
3784                  * currently pointing to our region in the aperture.
3785                  */
3786                 if (INTEL_INFO(dev)->gen < 6) {
3787                         ret = i915_gem_object_put_fence(obj);
3788                         if (ret)
3789                                 return ret;
3790                 }
3791
3792                 list_for_each_entry(vma, &obj->vma_list, vma_link)
3793                         if (drm_mm_node_allocated(&vma->node))
3794                                 vma->bind_vma(vma, cache_level,
3795                                                 vma->bound & GLOBAL_BIND);
3796         }
3797
3798         list_for_each_entry(vma, &obj->vma_list, vma_link)
3799                 vma->node.color = cache_level;
3800         obj->cache_level = cache_level;
3801
3802         if (cpu_write_needs_clflush(obj)) {
3803                 u32 old_read_domains, old_write_domain;
3804
3805                 /* If we're coming from LLC cached, then we haven't
3806                  * actually been tracking whether the data is in the
3807                  * CPU cache or not, since we only allow one bit set
3808                  * in obj->write_domain and have been skipping the clflushes.
3809                  * Just set it to the CPU cache for now.
3810                  */
3811                 i915_gem_object_retire(obj);
3812                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3813
3814                 old_read_domains = obj->base.read_domains;
3815                 old_write_domain = obj->base.write_domain;
3816
3817                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3818                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3819
3820                 trace_i915_gem_object_change_domain(obj,
3821                                                     old_read_domains,
3822                                                     old_write_domain);
3823         }
3824
3825         return 0;
3826 }
3827
3828 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3829                                struct drm_file *file)
3830 {
3831         struct drm_i915_gem_caching *args = data;
3832         struct drm_i915_gem_object *obj;
3833         int ret;
3834
3835         ret = i915_mutex_lock_interruptible(dev);
3836         if (ret)
3837                 return ret;
3838
3839         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3840         if (&obj->base == NULL) {
3841                 ret = -ENOENT;
3842                 goto unlock;
3843         }
3844
3845         switch (obj->cache_level) {
3846         case I915_CACHE_LLC:
3847         case I915_CACHE_L3_LLC:
3848                 args->caching = I915_CACHING_CACHED;
3849                 break;
3850
3851         case I915_CACHE_WT:
3852                 args->caching = I915_CACHING_DISPLAY;
3853                 break;
3854
3855         default:
3856                 args->caching = I915_CACHING_NONE;
3857                 break;
3858         }
3859
3860         drm_gem_object_unreference(&obj->base);
3861 unlock:
3862         mutex_unlock(&dev->struct_mutex);
3863         return ret;
3864 }
3865
3866 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3867                                struct drm_file *file)
3868 {
3869         struct drm_i915_gem_caching *args = data;
3870         struct drm_i915_gem_object *obj;
3871         enum i915_cache_level level;
3872         int ret;
3873
3874         switch (args->caching) {
3875         case I915_CACHING_NONE:
3876                 level = I915_CACHE_NONE;
3877                 break;
3878         case I915_CACHING_CACHED:
3879                 level = I915_CACHE_LLC;
3880                 break;
3881         case I915_CACHING_DISPLAY:
3882                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3883                 break;
3884         default:
3885                 return -EINVAL;
3886         }
3887
3888         ret = i915_mutex_lock_interruptible(dev);
3889         if (ret)
3890                 return ret;
3891
3892         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3893         if (&obj->base == NULL) {
3894                 ret = -ENOENT;
3895                 goto unlock;
3896         }
3897
3898         ret = i915_gem_object_set_cache_level(obj, level);
3899
3900         drm_gem_object_unreference(&obj->base);
3901 unlock:
3902         mutex_unlock(&dev->struct_mutex);
3903         return ret;
3904 }
3905
3906 static bool is_pin_display(struct drm_i915_gem_object *obj)
3907 {
3908         struct i915_vma *vma;
3909
3910         vma = i915_gem_obj_to_ggtt(obj);
3911         if (!vma)
3912                 return false;
3913
3914         /* There are 2 sources that pin objects:
3915          *   1. The display engine (scanouts, sprites, cursors);
3916          *   2. Reservations for execbuffer;
3917          *
3918          * We can ignore reservations as we hold the struct_mutex and
3919          * are only called outside of the reservation path.
3920          */
3921         return vma->pin_count;
3922 }
3923
3924 /*
3925  * Prepare buffer for display plane (scanout, cursors, etc).
3926  * Can be called from an uninterruptible phase (modesetting) and allows
3927  * any flushes to be pipelined (for pageflips).
3928  */
3929 int
3930 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3931                                      u32 alignment,
3932                                      struct intel_engine_cs *pipelined)
3933 {
3934         u32 old_read_domains, old_write_domain;
3935         bool was_pin_display;
3936         int ret;
3937
3938         if (pipelined != obj->ring) {
3939                 ret = i915_gem_object_sync(obj, pipelined);
3940                 if (ret)
3941                         return ret;
3942         }
3943
3944         /* Mark the pin_display early so that we account for the
3945          * display coherency whilst setting up the cache domains.
3946          */
3947         was_pin_display = obj->pin_display;
3948         obj->pin_display = true;
3949
3950         /* The display engine is not coherent with the LLC cache on gen6.  As
3951          * a result, we make sure that the pinning that is about to occur is
3952          * done with uncached PTEs. This is lowest common denominator for all
3953          * chipsets.
3954          *
3955          * However for gen6+, we could do better by using the GFDT bit instead
3956          * of uncaching, which would allow us to flush all the LLC-cached data
3957          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3958          */
3959         ret = i915_gem_object_set_cache_level(obj,
3960                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3961         if (ret)
3962                 goto err_unpin_display;
3963
3964         /* As the user may map the buffer once pinned in the display plane
3965          * (e.g. libkms for the bootup splash), we have to ensure that we
3966          * always use map_and_fenceable for all scanout buffers.
3967          */
3968         ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3969         if (ret)
3970                 goto err_unpin_display;
3971
3972         i915_gem_object_flush_cpu_write_domain(obj, true);
3973
3974         old_write_domain = obj->base.write_domain;
3975         old_read_domains = obj->base.read_domains;
3976
3977         /* It should now be out of any other write domains, and we can update
3978          * the domain values for our changes.
3979          */
3980         obj->base.write_domain = 0;
3981         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3982
3983         trace_i915_gem_object_change_domain(obj,
3984                                             old_read_domains,
3985                                             old_write_domain);
3986
3987         return 0;
3988
3989 err_unpin_display:
3990         WARN_ON(was_pin_display != is_pin_display(obj));
3991         obj->pin_display = was_pin_display;
3992         return ret;
3993 }
3994
3995 void
3996 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3997 {
3998         i915_gem_object_ggtt_unpin(obj);
3999         obj->pin_display = is_pin_display(obj);
4000 }
4001
4002 int
4003 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
4004 {
4005         int ret;
4006
4007         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
4008                 return 0;
4009
4010         ret = i915_gem_object_wait_rendering(obj, false);
4011         if (ret)
4012                 return ret;
4013
4014         /* Ensure that we invalidate the GPU's caches and TLBs. */
4015         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
4016         return 0;
4017 }
4018
4019 /**
4020  * Moves a single object to the CPU read, and possibly write domain.
4021  *
4022  * This function returns when the move is complete, including waiting on
4023  * flushes to occur.
4024  */
4025 int
4026 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4027 {
4028         uint32_t old_write_domain, old_read_domains;
4029         int ret;
4030
4031         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4032                 return 0;
4033
4034         ret = i915_gem_object_wait_rendering(obj, !write);
4035         if (ret)
4036                 return ret;
4037
4038         i915_gem_object_retire(obj);
4039         i915_gem_object_flush_gtt_write_domain(obj);
4040
4041         old_write_domain = obj->base.write_domain;
4042         old_read_domains = obj->base.read_domains;
4043
4044         /* Flush the CPU cache if it's still invalid. */
4045         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4046                 i915_gem_clflush_object(obj, false);
4047
4048                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4049         }
4050
4051         /* It should now be out of any other write domains, and we can update
4052          * the domain values for our changes.
4053          */
4054         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4055
4056         /* If we're writing through the CPU, then the GPU read domains will
4057          * need to be invalidated at next use.
4058          */
4059         if (write) {
4060                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4061                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4062         }
4063
4064         if (write)
4065                 intel_fb_obj_invalidate(obj, NULL);
4066
4067         trace_i915_gem_object_change_domain(obj,
4068                                             old_read_domains,
4069                                             old_write_domain);
4070
4071         return 0;
4072 }
4073
4074 /* Throttle our rendering by waiting until the ring has completed our requests
4075  * emitted over 20 msec ago.
4076  *
4077  * Note that if we were to use the current jiffies each time around the loop,
4078  * we wouldn't escape the function with any frames outstanding if the time to
4079  * render a frame was over 20ms.
4080  *
4081  * This should get us reasonable parallelism between CPU and GPU but also
4082  * relatively low latency when blocking on a particular request to finish.
4083  */
4084 static int
4085 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4086 {
4087         struct drm_i915_private *dev_priv = dev->dev_private;
4088         struct drm_i915_file_private *file_priv = file->driver_priv;
4089         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
4090         struct drm_i915_gem_request *request;
4091         struct intel_engine_cs *ring = NULL;
4092         unsigned reset_counter;
4093         u32 seqno = 0;
4094         int ret;
4095
4096         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4097         if (ret)
4098                 return ret;
4099
4100         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4101         if (ret)
4102                 return ret;
4103
4104         spin_lock(&file_priv->mm.lock);
4105         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4106                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4107                         break;
4108
4109                 ring = request->ring;
4110                 seqno = request->seqno;
4111         }
4112         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4113         spin_unlock(&file_priv->mm.lock);
4114
4115         if (seqno == 0)
4116                 return 0;
4117
4118         ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
4119         if (ret == 0)
4120                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4121
4122         return ret;
4123 }
4124
4125 static bool
4126 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4127 {
4128         struct drm_i915_gem_object *obj = vma->obj;
4129
4130         if (alignment &&
4131             vma->node.start & (alignment - 1))
4132                 return true;
4133
4134         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4135                 return true;
4136
4137         if (flags & PIN_OFFSET_BIAS &&
4138             vma->node.start < (flags & PIN_OFFSET_MASK))
4139                 return true;
4140
4141         return false;
4142 }
4143
4144 int
4145 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4146                     struct i915_address_space *vm,
4147                     uint32_t alignment,
4148                     uint64_t flags)
4149 {
4150         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4151         struct i915_vma *vma;
4152         unsigned bound;
4153         int ret;
4154
4155         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4156                 return -ENODEV;
4157
4158         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4159                 return -EINVAL;
4160
4161         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4162                 return -EINVAL;
4163
4164         vma = i915_gem_obj_to_vma(obj, vm);
4165         if (vma) {
4166                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4167                         return -EBUSY;
4168
4169                 if (i915_vma_misplaced(vma, alignment, flags)) {
4170                         WARN(vma->pin_count,
4171                              "bo is already pinned with incorrect alignment:"
4172                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4173                              " obj->map_and_fenceable=%d\n",
4174                              i915_gem_obj_offset(obj, vm), alignment,
4175                              !!(flags & PIN_MAPPABLE),
4176                              obj->map_and_fenceable);
4177                         ret = i915_vma_unbind(vma);
4178                         if (ret)
4179                                 return ret;
4180
4181                         vma = NULL;
4182                 }
4183         }
4184
4185         bound = vma ? vma->bound : 0;
4186         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4187                 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
4188                 if (IS_ERR(vma))
4189                         return PTR_ERR(vma);
4190         }
4191
4192         if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
4193                 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
4194
4195         if ((bound ^ vma->bound) & GLOBAL_BIND) {
4196                 bool mappable, fenceable;
4197                 u32 fence_size, fence_alignment;
4198
4199                 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4200                                                    obj->base.size,
4201                                                    obj->tiling_mode);
4202                 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4203                                                              obj->base.size,
4204                                                              obj->tiling_mode,
4205                                                              true);
4206
4207                 fenceable = (vma->node.size == fence_size &&
4208                              (vma->node.start & (fence_alignment - 1)) == 0);
4209
4210                 mappable = (vma->node.start + obj->base.size <=
4211                             dev_priv->gtt.mappable_end);
4212
4213                 obj->map_and_fenceable = mappable && fenceable;
4214         }
4215
4216         WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4217
4218         vma->pin_count++;
4219         if (flags & PIN_MAPPABLE)
4220                 obj->pin_mappable |= true;
4221
4222         return 0;
4223 }
4224
4225 void
4226 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
4227 {
4228         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
4229
4230         BUG_ON(!vma);
4231         BUG_ON(vma->pin_count == 0);
4232         BUG_ON(!i915_gem_obj_ggtt_bound(obj));
4233
4234         if (--vma->pin_count == 0)
4235                 obj->pin_mappable = false;
4236 }
4237
4238 bool
4239 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4240 {
4241         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4242                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4243                 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4244
4245                 WARN_ON(!ggtt_vma ||
4246                         dev_priv->fence_regs[obj->fence_reg].pin_count >
4247                         ggtt_vma->pin_count);
4248                 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4249                 return true;
4250         } else
4251                 return false;
4252 }
4253
4254 void
4255 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4256 {
4257         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4258                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4259                 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4260                 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4261         }
4262 }
4263
4264 int
4265 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4266                     struct drm_file *file)
4267 {
4268         struct drm_i915_gem_busy *args = data;
4269         struct drm_i915_gem_object *obj;
4270         int ret;
4271
4272         ret = i915_mutex_lock_interruptible(dev);
4273         if (ret)
4274                 return ret;
4275
4276         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4277         if (&obj->base == NULL) {
4278                 ret = -ENOENT;
4279                 goto unlock;
4280         }
4281
4282         /* Count all active objects as busy, even if they are currently not used
4283          * by the gpu. Users of this interface expect objects to eventually
4284          * become non-busy without any further actions, therefore emit any
4285          * necessary flushes here.
4286          */
4287         ret = i915_gem_object_flush_active(obj);
4288
4289         args->busy = obj->active;
4290         if (obj->ring) {
4291                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4292                 args->busy |= intel_ring_flag(obj->ring) << 16;
4293         }
4294
4295         drm_gem_object_unreference(&obj->base);
4296 unlock:
4297         mutex_unlock(&dev->struct_mutex);
4298         return ret;
4299 }
4300
4301 int
4302 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4303                         struct drm_file *file_priv)
4304 {
4305         return i915_gem_ring_throttle(dev, file_priv);
4306 }
4307
4308 int
4309 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4310                        struct drm_file *file_priv)
4311 {
4312         struct drm_i915_private *dev_priv = dev->dev_private;
4313         struct drm_i915_gem_madvise *args = data;
4314         struct drm_i915_gem_object *obj;
4315         int ret;
4316
4317         switch (args->madv) {
4318         case I915_MADV_DONTNEED:
4319         case I915_MADV_WILLNEED:
4320             break;
4321         default:
4322             return -EINVAL;
4323         }
4324
4325         ret = i915_mutex_lock_interruptible(dev);
4326         if (ret)
4327                 return ret;
4328
4329         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4330         if (&obj->base == NULL) {
4331                 ret = -ENOENT;
4332                 goto unlock;
4333         }
4334
4335         if (i915_gem_obj_is_pinned(obj)) {
4336                 ret = -EINVAL;
4337                 goto out;
4338         }
4339
4340         if (obj->pages &&
4341             obj->tiling_mode != I915_TILING_NONE &&
4342             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4343                 if (obj->madv == I915_MADV_WILLNEED)
4344                         i915_gem_object_unpin_pages(obj);
4345                 if (args->madv == I915_MADV_WILLNEED)
4346                         i915_gem_object_pin_pages(obj);
4347         }
4348
4349         if (obj->madv != __I915_MADV_PURGED)
4350                 obj->madv = args->madv;
4351
4352         /* if the object is no longer attached, discard its backing storage */
4353         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4354                 i915_gem_object_truncate(obj);
4355
4356         args->retained = obj->madv != __I915_MADV_PURGED;
4357
4358 out:
4359         drm_gem_object_unreference(&obj->base);
4360 unlock:
4361         mutex_unlock(&dev->struct_mutex);
4362         return ret;
4363 }
4364
4365 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4366                           const struct drm_i915_gem_object_ops *ops)
4367 {
4368         INIT_LIST_HEAD(&obj->global_list);
4369         INIT_LIST_HEAD(&obj->ring_list);
4370         INIT_LIST_HEAD(&obj->obj_exec_link);
4371         INIT_LIST_HEAD(&obj->vma_list);
4372
4373         obj->ops = ops;
4374
4375         obj->fence_reg = I915_FENCE_REG_NONE;
4376         obj->madv = I915_MADV_WILLNEED;
4377
4378         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4379 }
4380
4381 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4382         .get_pages = i915_gem_object_get_pages_gtt,
4383         .put_pages = i915_gem_object_put_pages_gtt,
4384 };
4385
4386 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4387                                                   size_t size)
4388 {
4389         struct drm_i915_gem_object *obj;
4390         struct address_space *mapping;
4391         gfp_t mask;
4392
4393         obj = i915_gem_object_alloc(dev);
4394         if (obj == NULL)
4395                 return NULL;
4396
4397         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4398                 i915_gem_object_free(obj);
4399                 return NULL;
4400         }
4401
4402         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4403         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4404                 /* 965gm cannot relocate objects above 4GiB. */
4405                 mask &= ~__GFP_HIGHMEM;
4406                 mask |= __GFP_DMA32;
4407         }
4408
4409         mapping = file_inode(obj->base.filp)->i_mapping;
4410         mapping_set_gfp_mask(mapping, mask);
4411
4412         i915_gem_object_init(obj, &i915_gem_object_ops);
4413
4414         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4415         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4416
4417         if (HAS_LLC(dev)) {
4418                 /* On some devices, we can have the GPU use the LLC (the CPU
4419                  * cache) for about a 10% performance improvement
4420                  * compared to uncached.  Graphics requests other than
4421                  * display scanout are coherent with the CPU in
4422                  * accessing this cache.  This means in this mode we
4423                  * don't need to clflush on the CPU side, and on the
4424                  * GPU side we only need to flush internal caches to
4425                  * get data visible to the CPU.
4426                  *
4427                  * However, we maintain the display planes as UC, and so
4428                  * need to rebind when first used as such.
4429                  */
4430                 obj->cache_level = I915_CACHE_LLC;
4431         } else
4432                 obj->cache_level = I915_CACHE_NONE;
4433
4434         trace_i915_gem_object_create(obj);
4435
4436         return obj;
4437 }
4438
4439 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4440 {
4441         /* If we are the last user of the backing storage (be it shmemfs
4442          * pages or stolen etc), we know that the pages are going to be
4443          * immediately released. In this case, we can then skip copying
4444          * back the contents from the GPU.
4445          */
4446
4447         if (obj->madv != I915_MADV_WILLNEED)
4448                 return false;
4449
4450         if (obj->base.filp == NULL)
4451                 return true;
4452
4453         /* At first glance, this looks racy, but then again so would be
4454          * userspace racing mmap against close. However, the first external
4455          * reference to the filp can only be obtained through the
4456          * i915_gem_mmap_ioctl() which safeguards us against the user
4457          * acquiring such a reference whilst we are in the middle of
4458          * freeing the object.
4459          */
4460         return atomic_long_read(&obj->base.filp->f_count) == 1;
4461 }
4462
4463 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4464 {
4465         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4466         struct drm_device *dev = obj->base.dev;
4467         struct drm_i915_private *dev_priv = dev->dev_private;
4468         struct i915_vma *vma, *next;
4469
4470         intel_runtime_pm_get(dev_priv);
4471
4472         trace_i915_gem_object_destroy(obj);
4473
4474         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4475                 int ret;
4476
4477                 vma->pin_count = 0;
4478                 ret = i915_vma_unbind(vma);
4479                 if (WARN_ON(ret == -ERESTARTSYS)) {
4480                         bool was_interruptible;
4481
4482                         was_interruptible = dev_priv->mm.interruptible;
4483                         dev_priv->mm.interruptible = false;
4484
4485                         WARN_ON(i915_vma_unbind(vma));
4486
4487                         dev_priv->mm.interruptible = was_interruptible;
4488                 }
4489         }
4490
4491         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4492          * before progressing. */
4493         if (obj->stolen)
4494                 i915_gem_object_unpin_pages(obj);
4495
4496         WARN_ON(obj->frontbuffer_bits);
4497
4498         if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4499             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4500             obj->tiling_mode != I915_TILING_NONE)
4501                 i915_gem_object_unpin_pages(obj);
4502
4503         if (WARN_ON(obj->pages_pin_count))
4504                 obj->pages_pin_count = 0;
4505         if (discard_backing_storage(obj))
4506                 obj->madv = I915_MADV_DONTNEED;
4507         i915_gem_object_put_pages(obj);
4508         i915_gem_object_free_mmap_offset(obj);
4509
4510         BUG_ON(obj->pages);
4511
4512         if (obj->base.import_attach)
4513                 drm_prime_gem_destroy(&obj->base, NULL);
4514
4515         if (obj->ops->release)
4516                 obj->ops->release(obj);
4517
4518         drm_gem_object_release(&obj->base);
4519         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4520
4521         kfree(obj->bit_17);
4522         i915_gem_object_free(obj);
4523
4524         intel_runtime_pm_put(dev_priv);
4525 }
4526
4527 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4528                                      struct i915_address_space *vm)
4529 {
4530         struct i915_vma *vma;
4531         list_for_each_entry(vma, &obj->vma_list, vma_link)
4532                 if (vma->vm == vm)
4533                         return vma;
4534
4535         return NULL;
4536 }
4537
4538 void i915_gem_vma_destroy(struct i915_vma *vma)
4539 {
4540         struct i915_address_space *vm = NULL;
4541         WARN_ON(vma->node.allocated);
4542
4543         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4544         if (!list_empty(&vma->exec_list))
4545                 return;
4546
4547         vm = vma->vm;
4548
4549         if (!i915_is_ggtt(vm))
4550                 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4551
4552         list_del(&vma->vma_link);
4553
4554         kfree(vma);
4555 }
4556
4557 static void
4558 i915_gem_stop_ringbuffers(struct drm_device *dev)
4559 {
4560         struct drm_i915_private *dev_priv = dev->dev_private;
4561         struct intel_engine_cs *ring;
4562         int i;
4563
4564         for_each_ring(ring, dev_priv, i)
4565                 dev_priv->gt.stop_ring(ring);
4566 }
4567
4568 int
4569 i915_gem_suspend(struct drm_device *dev)
4570 {
4571         struct drm_i915_private *dev_priv = dev->dev_private;
4572         int ret = 0;
4573
4574         mutex_lock(&dev->struct_mutex);
4575         ret = i915_gpu_idle(dev);
4576         if (ret)
4577                 goto err;
4578
4579         i915_gem_retire_requests(dev);
4580
4581         /* Under UMS, be paranoid and evict. */
4582         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4583                 i915_gem_evict_everything(dev);
4584
4585         i915_gem_stop_ringbuffers(dev);
4586         mutex_unlock(&dev->struct_mutex);
4587
4588         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4589         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4590         flush_delayed_work(&dev_priv->mm.idle_work);
4591
4592         /* Assert that we sucessfully flushed all the work and
4593          * reset the GPU back to its idle, low power state.
4594          */
4595         WARN_ON(dev_priv->mm.busy);
4596
4597         return 0;
4598
4599 err:
4600         mutex_unlock(&dev->struct_mutex);
4601         return ret;
4602 }
4603
4604 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4605 {
4606         struct drm_device *dev = ring->dev;
4607         struct drm_i915_private *dev_priv = dev->dev_private;
4608         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4609         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4610         int i, ret;
4611
4612         if (!HAS_L3_DPF(dev) || !remap_info)
4613                 return 0;
4614
4615         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4616         if (ret)
4617                 return ret;
4618
4619         /*
4620          * Note: We do not worry about the concurrent register cacheline hang
4621          * here because no other code should access these registers other than
4622          * at initialization time.
4623          */
4624         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4625                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4626                 intel_ring_emit(ring, reg_base + i);
4627                 intel_ring_emit(ring, remap_info[i/4]);
4628         }
4629
4630         intel_ring_advance(ring);
4631
4632         return ret;
4633 }
4634
4635 void i915_gem_init_swizzling(struct drm_device *dev)
4636 {
4637         struct drm_i915_private *dev_priv = dev->dev_private;
4638
4639         if (INTEL_INFO(dev)->gen < 5 ||
4640             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4641                 return;
4642
4643         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4644                                  DISP_TILE_SURFACE_SWIZZLING);
4645
4646         if (IS_GEN5(dev))
4647                 return;
4648
4649         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4650         if (IS_GEN6(dev))
4651                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4652         else if (IS_GEN7(dev))
4653                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4654         else if (IS_GEN8(dev))
4655                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4656         else
4657                 BUG();
4658 }
4659
4660 static bool
4661 intel_enable_blt(struct drm_device *dev)
4662 {
4663         if (!HAS_BLT(dev))
4664                 return false;
4665
4666         /* The blitter was dysfunctional on early prototypes */
4667         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4668                 DRM_INFO("BLT not supported on this pre-production hardware;"
4669                          " graphics performance will be degraded.\n");
4670                 return false;
4671         }
4672
4673         return true;
4674 }
4675
4676 static void init_unused_ring(struct drm_device *dev, u32 base)
4677 {
4678         struct drm_i915_private *dev_priv = dev->dev_private;
4679
4680         I915_WRITE(RING_CTL(base), 0);
4681         I915_WRITE(RING_HEAD(base), 0);
4682         I915_WRITE(RING_TAIL(base), 0);
4683         I915_WRITE(RING_START(base), 0);
4684 }
4685
4686 static void init_unused_rings(struct drm_device *dev)
4687 {
4688         if (IS_I830(dev)) {
4689                 init_unused_ring(dev, PRB1_BASE);
4690                 init_unused_ring(dev, SRB0_BASE);
4691                 init_unused_ring(dev, SRB1_BASE);
4692                 init_unused_ring(dev, SRB2_BASE);
4693                 init_unused_ring(dev, SRB3_BASE);
4694         } else if (IS_GEN2(dev)) {
4695                 init_unused_ring(dev, SRB0_BASE);
4696                 init_unused_ring(dev, SRB1_BASE);
4697         } else if (IS_GEN3(dev)) {
4698                 init_unused_ring(dev, PRB1_BASE);
4699                 init_unused_ring(dev, PRB2_BASE);
4700         }
4701 }
4702
4703 int i915_gem_init_rings(struct drm_device *dev)
4704 {
4705         struct drm_i915_private *dev_priv = dev->dev_private;
4706         int ret;
4707
4708         /*
4709          * At least 830 can leave some of the unused rings
4710          * "active" (ie. head != tail) after resume which
4711          * will prevent c3 entry. Makes sure all unused rings
4712          * are totally idle.
4713          */
4714         init_unused_rings(dev);
4715
4716         ret = intel_init_render_ring_buffer(dev);
4717         if (ret)
4718                 return ret;
4719
4720         if (HAS_BSD(dev)) {
4721                 ret = intel_init_bsd_ring_buffer(dev);
4722                 if (ret)
4723                         goto cleanup_render_ring;
4724         }
4725
4726         if (intel_enable_blt(dev)) {
4727                 ret = intel_init_blt_ring_buffer(dev);
4728                 if (ret)
4729                         goto cleanup_bsd_ring;
4730         }
4731
4732         if (HAS_VEBOX(dev)) {
4733                 ret = intel_init_vebox_ring_buffer(dev);
4734                 if (ret)
4735                         goto cleanup_blt_ring;
4736         }
4737
4738         if (HAS_BSD2(dev)) {
4739                 ret = intel_init_bsd2_ring_buffer(dev);
4740                 if (ret)
4741                         goto cleanup_vebox_ring;
4742         }
4743
4744         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4745         if (ret)
4746                 goto cleanup_bsd2_ring;
4747
4748         return 0;
4749
4750 cleanup_bsd2_ring:
4751         intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4752 cleanup_vebox_ring:
4753         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4754 cleanup_blt_ring:
4755         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4756 cleanup_bsd_ring:
4757         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4758 cleanup_render_ring:
4759         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4760
4761         return ret;
4762 }
4763
4764 int
4765 i915_gem_init_hw(struct drm_device *dev)
4766 {
4767         struct drm_i915_private *dev_priv = dev->dev_private;
4768         int ret, i;
4769
4770         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4771                 return -EIO;
4772
4773         if (dev_priv->ellc_size)
4774                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4775
4776         if (IS_HASWELL(dev))
4777                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4778                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4779
4780         if (HAS_PCH_NOP(dev)) {
4781                 if (IS_IVYBRIDGE(dev)) {
4782                         u32 temp = I915_READ(GEN7_MSG_CTL);
4783                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4784                         I915_WRITE(GEN7_MSG_CTL, temp);
4785                 } else if (INTEL_INFO(dev)->gen >= 7) {
4786                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4787                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4788                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4789                 }
4790         }
4791
4792         i915_gem_init_swizzling(dev);
4793
4794         ret = dev_priv->gt.init_rings(dev);
4795         if (ret)
4796                 return ret;
4797
4798         for (i = 0; i < NUM_L3_SLICES(dev); i++)
4799                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4800
4801         /*
4802          * XXX: Contexts should only be initialized once. Doing a switch to the
4803          * default context switch however is something we'd like to do after
4804          * reset or thaw (the latter may not actually be necessary for HW, but
4805          * goes with our code better). Context switching requires rings (for
4806          * the do_switch), but before enabling PPGTT. So don't move this.
4807          */
4808         ret = i915_gem_context_enable(dev_priv);
4809         if (ret && ret != -EIO) {
4810                 DRM_ERROR("Context enable failed %d\n", ret);
4811                 i915_gem_cleanup_ringbuffer(dev);
4812
4813                 return ret;
4814         }
4815
4816         ret = i915_ppgtt_init_hw(dev);
4817         if (ret && ret != -EIO) {
4818                 DRM_ERROR("PPGTT enable failed %d\n", ret);
4819                 i915_gem_cleanup_ringbuffer(dev);
4820         }
4821
4822         return ret;
4823 }
4824
4825 int i915_gem_init(struct drm_device *dev)
4826 {
4827         struct drm_i915_private *dev_priv = dev->dev_private;
4828         int ret;
4829
4830         i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4831                         i915.enable_execlists);
4832
4833         mutex_lock(&dev->struct_mutex);
4834
4835         if (IS_VALLEYVIEW(dev)) {
4836                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4837                 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4838                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4839                               VLV_GTLC_ALLOWWAKEACK), 10))
4840                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4841         }
4842
4843         if (!i915.enable_execlists) {
4844                 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4845                 dev_priv->gt.init_rings = i915_gem_init_rings;
4846                 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4847                 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4848         } else {
4849                 dev_priv->gt.do_execbuf = intel_execlists_submission;
4850                 dev_priv->gt.init_rings = intel_logical_rings_init;
4851                 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4852                 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4853         }
4854
4855         ret = i915_gem_init_userptr(dev);
4856         if (ret) {
4857                 mutex_unlock(&dev->struct_mutex);
4858                 return ret;
4859         }
4860
4861         i915_gem_init_global_gtt(dev);
4862
4863         ret = i915_gem_context_init(dev);
4864         if (ret) {
4865                 mutex_unlock(&dev->struct_mutex);
4866                 return ret;
4867         }
4868
4869         ret = i915_gem_init_hw(dev);
4870         if (ret == -EIO) {
4871                 /* Allow ring initialisation to fail by marking the GPU as
4872                  * wedged. But we only want to do this where the GPU is angry,
4873                  * for all other failure, such as an allocation failure, bail.
4874                  */
4875                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4876                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4877                 ret = 0;
4878         }
4879         mutex_unlock(&dev->struct_mutex);
4880
4881         return ret;
4882 }
4883
4884 void
4885 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4886 {
4887         struct drm_i915_private *dev_priv = dev->dev_private;
4888         struct intel_engine_cs *ring;
4889         int i;
4890
4891         for_each_ring(ring, dev_priv, i)
4892                 dev_priv->gt.cleanup_ring(ring);
4893 }
4894
4895 static void
4896 init_ring_lists(struct intel_engine_cs *ring)
4897 {
4898         INIT_LIST_HEAD(&ring->active_list);
4899         INIT_LIST_HEAD(&ring->request_list);
4900 }
4901
4902 void i915_init_vm(struct drm_i915_private *dev_priv,
4903                   struct i915_address_space *vm)
4904 {
4905         if (!i915_is_ggtt(vm))
4906                 drm_mm_init(&vm->mm, vm->start, vm->total);
4907         vm->dev = dev_priv->dev;
4908         INIT_LIST_HEAD(&vm->active_list);
4909         INIT_LIST_HEAD(&vm->inactive_list);
4910         INIT_LIST_HEAD(&vm->global_link);
4911         list_add_tail(&vm->global_link, &dev_priv->vm_list);
4912 }
4913
4914 void
4915 i915_gem_load(struct drm_device *dev)
4916 {
4917         struct drm_i915_private *dev_priv = dev->dev_private;
4918         int i;
4919
4920         dev_priv->slab =
4921                 kmem_cache_create("i915_gem_object",
4922                                   sizeof(struct drm_i915_gem_object), 0,
4923                                   SLAB_HWCACHE_ALIGN,
4924                                   NULL);
4925
4926         INIT_LIST_HEAD(&dev_priv->vm_list);
4927         i915_init_vm(dev_priv, &dev_priv->gtt.base);
4928
4929         INIT_LIST_HEAD(&dev_priv->context_list);
4930         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4931         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4932         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4933         for (i = 0; i < I915_NUM_RINGS; i++)
4934                 init_ring_lists(&dev_priv->ring[i]);
4935         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4936                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4937         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4938                           i915_gem_retire_work_handler);
4939         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4940                           i915_gem_idle_work_handler);
4941         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4942
4943         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4944         if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
4945                 I915_WRITE(MI_ARB_STATE,
4946                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4947         }
4948
4949         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4950
4951         /* Old X drivers will take 0-2 for front, back, depth buffers */
4952         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4953                 dev_priv->fence_reg_start = 3;
4954
4955         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4956                 dev_priv->num_fence_regs = 32;
4957         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4958                 dev_priv->num_fence_regs = 16;
4959         else
4960                 dev_priv->num_fence_regs = 8;
4961
4962         /* Initialize fence registers to zero */
4963         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4964         i915_gem_restore_fences(dev);
4965
4966         i915_gem_detect_bit_6_swizzle(dev);
4967         init_waitqueue_head(&dev_priv->pending_flip_queue);
4968
4969         dev_priv->mm.interruptible = true;
4970
4971         dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
4972         dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
4973         dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
4974         register_shrinker(&dev_priv->mm.shrinker);
4975
4976         dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
4977         register_oom_notifier(&dev_priv->mm.oom_notifier);
4978
4979         mutex_init(&dev_priv->fb_tracking.lock);
4980 }
4981
4982 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4983 {
4984         struct drm_i915_file_private *file_priv = file->driver_priv;
4985
4986         cancel_delayed_work_sync(&file_priv->mm.idle_work);
4987
4988         /* Clean up our request list when the client is going away, so that
4989          * later retire_requests won't dereference our soon-to-be-gone
4990          * file_priv.
4991          */
4992         spin_lock(&file_priv->mm.lock);
4993         while (!list_empty(&file_priv->mm.request_list)) {
4994                 struct drm_i915_gem_request *request;
4995
4996                 request = list_first_entry(&file_priv->mm.request_list,
4997                                            struct drm_i915_gem_request,
4998                                            client_list);
4999                 list_del(&request->client_list);
5000                 request->file_priv = NULL;
5001         }
5002         spin_unlock(&file_priv->mm.lock);
5003 }
5004
5005 static void
5006 i915_gem_file_idle_work_handler(struct work_struct *work)
5007 {
5008         struct drm_i915_file_private *file_priv =
5009                 container_of(work, typeof(*file_priv), mm.idle_work.work);
5010
5011         atomic_set(&file_priv->rps_wait_boost, false);
5012 }
5013
5014 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5015 {
5016         struct drm_i915_file_private *file_priv;
5017         int ret;
5018
5019         DRM_DEBUG_DRIVER("\n");
5020
5021         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5022         if (!file_priv)
5023                 return -ENOMEM;
5024
5025         file->driver_priv = file_priv;
5026         file_priv->dev_priv = dev->dev_private;
5027         file_priv->file = file;
5028
5029         spin_lock_init(&file_priv->mm.lock);
5030         INIT_LIST_HEAD(&file_priv->mm.request_list);
5031         INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5032                           i915_gem_file_idle_work_handler);
5033
5034         ret = i915_gem_context_open(dev, file);
5035         if (ret)
5036                 kfree(file_priv);
5037
5038         return ret;
5039 }
5040
5041 /**
5042  * i915_gem_track_fb - update frontbuffer tracking
5043  * old: current GEM buffer for the frontbuffer slots
5044  * new: new GEM buffer for the frontbuffer slots
5045  * frontbuffer_bits: bitmask of frontbuffer slots
5046  *
5047  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5048  * from @old and setting them in @new. Both @old and @new can be NULL.
5049  */
5050 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5051                        struct drm_i915_gem_object *new,
5052                        unsigned frontbuffer_bits)
5053 {
5054         if (old) {
5055                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5056                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5057                 old->frontbuffer_bits &= ~frontbuffer_bits;
5058         }
5059
5060         if (new) {
5061                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5062                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5063                 new->frontbuffer_bits |= frontbuffer_bits;
5064         }
5065 }
5066
5067 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5068 {
5069         if (!mutex_is_locked(mutex))
5070                 return false;
5071
5072 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5073         return mutex->owner == task;
5074 #else
5075         /* Since UP may be pre-empted, we cannot assume that we own the lock */
5076         return false;
5077 #endif
5078 }
5079
5080 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5081 {
5082         if (!mutex_trylock(&dev->struct_mutex)) {
5083                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5084                         return false;
5085
5086                 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5087                         return false;
5088
5089                 *unlock = false;
5090         } else
5091                 *unlock = true;
5092
5093         return true;
5094 }
5095
5096 static int num_vma_bound(struct drm_i915_gem_object *obj)
5097 {
5098         struct i915_vma *vma;
5099         int count = 0;
5100
5101         list_for_each_entry(vma, &obj->vma_list, vma_link)
5102                 if (drm_mm_node_allocated(&vma->node))
5103                         count++;
5104
5105         return count;
5106 }
5107
5108 static unsigned long
5109 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5110 {
5111         struct drm_i915_private *dev_priv =
5112                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5113         struct drm_device *dev = dev_priv->dev;
5114         struct drm_i915_gem_object *obj;
5115         unsigned long count;
5116         bool unlock;
5117
5118         if (!i915_gem_shrinker_lock(dev, &unlock))
5119                 return 0;
5120
5121         count = 0;
5122         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5123                 if (obj->pages_pin_count == 0)
5124                         count += obj->base.size >> PAGE_SHIFT;
5125
5126         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5127                 if (!i915_gem_obj_is_pinned(obj) &&
5128                     obj->pages_pin_count == num_vma_bound(obj))
5129                         count += obj->base.size >> PAGE_SHIFT;
5130         }
5131
5132         if (unlock)
5133                 mutex_unlock(&dev->struct_mutex);
5134
5135         return count;
5136 }
5137
5138 /* All the new VM stuff */
5139 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5140                                   struct i915_address_space *vm)
5141 {
5142         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5143         struct i915_vma *vma;
5144
5145         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5146
5147         list_for_each_entry(vma, &o->vma_list, vma_link) {
5148                 if (vma->vm == vm)
5149                         return vma->node.start;
5150
5151         }
5152         WARN(1, "%s vma for this object not found.\n",
5153              i915_is_ggtt(vm) ? "global" : "ppgtt");
5154         return -1;
5155 }
5156
5157 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5158                         struct i915_address_space *vm)
5159 {
5160         struct i915_vma *vma;
5161
5162         list_for_each_entry(vma, &o->vma_list, vma_link)
5163                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5164                         return true;
5165
5166         return false;
5167 }
5168
5169 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5170 {
5171         struct i915_vma *vma;
5172
5173         list_for_each_entry(vma, &o->vma_list, vma_link)
5174                 if (drm_mm_node_allocated(&vma->node))
5175                         return true;
5176
5177         return false;
5178 }
5179
5180 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5181                                 struct i915_address_space *vm)
5182 {
5183         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5184         struct i915_vma *vma;
5185
5186         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5187
5188         BUG_ON(list_empty(&o->vma_list));
5189
5190         list_for_each_entry(vma, &o->vma_list, vma_link)
5191                 if (vma->vm == vm)
5192                         return vma->node.size;
5193
5194         return 0;
5195 }
5196
5197 static unsigned long
5198 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5199 {
5200         struct drm_i915_private *dev_priv =
5201                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5202         struct drm_device *dev = dev_priv->dev;
5203         unsigned long freed;
5204         bool unlock;
5205
5206         if (!i915_gem_shrinker_lock(dev, &unlock))
5207                 return SHRINK_STOP;
5208
5209         freed = i915_gem_shrink(dev_priv,
5210                                 sc->nr_to_scan,
5211                                 I915_SHRINK_BOUND |
5212                                 I915_SHRINK_UNBOUND |
5213                                 I915_SHRINK_PURGEABLE);
5214         if (freed < sc->nr_to_scan)
5215                 freed += i915_gem_shrink(dev_priv,
5216                                          sc->nr_to_scan - freed,
5217                                          I915_SHRINK_BOUND |
5218                                          I915_SHRINK_UNBOUND);
5219         if (unlock)
5220                 mutex_unlock(&dev->struct_mutex);
5221
5222         return freed;
5223 }
5224
5225 static int
5226 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5227 {
5228         struct drm_i915_private *dev_priv =
5229                 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5230         struct drm_device *dev = dev_priv->dev;
5231         struct drm_i915_gem_object *obj;
5232         unsigned long timeout = msecs_to_jiffies(5000) + 1;
5233         unsigned long pinned, bound, unbound, freed_pages;
5234         bool was_interruptible;
5235         bool unlock;
5236
5237         while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
5238                 schedule_timeout_killable(1);
5239                 if (fatal_signal_pending(current))
5240                         return NOTIFY_DONE;
5241         }
5242         if (timeout == 0) {
5243                 pr_err("Unable to purge GPU memory due lock contention.\n");
5244                 return NOTIFY_DONE;
5245         }
5246
5247         was_interruptible = dev_priv->mm.interruptible;
5248         dev_priv->mm.interruptible = false;
5249
5250         freed_pages = i915_gem_shrink_all(dev_priv);
5251
5252         dev_priv->mm.interruptible = was_interruptible;
5253
5254         /* Because we may be allocating inside our own driver, we cannot
5255          * assert that there are no objects with pinned pages that are not
5256          * being pointed to by hardware.
5257          */
5258         unbound = bound = pinned = 0;
5259         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5260                 if (!obj->base.filp) /* not backed by a freeable object */
5261                         continue;
5262
5263                 if (obj->pages_pin_count)
5264                         pinned += obj->base.size;
5265                 else
5266                         unbound += obj->base.size;
5267         }
5268         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5269                 if (!obj->base.filp)
5270                         continue;
5271
5272                 if (obj->pages_pin_count)
5273                         pinned += obj->base.size;
5274                 else
5275                         bound += obj->base.size;
5276         }
5277
5278         if (unlock)
5279                 mutex_unlock(&dev->struct_mutex);
5280
5281         if (freed_pages || unbound || bound)
5282                 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5283                         freed_pages << PAGE_SHIFT, pinned);
5284         if (unbound || bound)
5285                 pr_err("%lu and %lu bytes still available in the "
5286                        "bound and unbound GPU page lists.\n",
5287                        bound, unbound);
5288
5289         *(unsigned long *)ptr += freed_pages;
5290         return NOTIFY_DONE;
5291 }
5292
5293 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5294 {
5295         struct i915_vma *vma;
5296
5297         vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5298         if (vma->vm != i915_obj_to_ggtt(obj))
5299                 return NULL;
5300
5301         return vma;
5302 }