]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_mocs.h"
37 #include <linux/reservation.h>
38 #include <linux/shmem_fs.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/pci.h>
42 #include <linux/dma-buf.h>
43
44 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
45 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
46
47 static bool cpu_cache_is_coherent(struct drm_device *dev,
48                                   enum i915_cache_level level)
49 {
50         return HAS_LLC(dev) || level != I915_CACHE_NONE;
51 }
52
53 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
54 {
55         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
56                 return false;
57
58         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
59                 return true;
60
61         return obj->pin_display;
62 }
63
64 static int
65 insert_mappable_node(struct drm_i915_private *i915,
66                      struct drm_mm_node *node, u32 size)
67 {
68         memset(node, 0, sizeof(*node));
69         return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
70                                                    size, 0, 0, 0,
71                                                    i915->ggtt.mappable_end,
72                                                    DRM_MM_SEARCH_DEFAULT,
73                                                    DRM_MM_CREATE_DEFAULT);
74 }
75
76 static void
77 remove_mappable_node(struct drm_mm_node *node)
78 {
79         drm_mm_remove_node(node);
80 }
81
82 /* some bookkeeping */
83 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
84                                   size_t size)
85 {
86         spin_lock(&dev_priv->mm.object_stat_lock);
87         dev_priv->mm.object_count++;
88         dev_priv->mm.object_memory += size;
89         spin_unlock(&dev_priv->mm.object_stat_lock);
90 }
91
92 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
93                                      size_t size)
94 {
95         spin_lock(&dev_priv->mm.object_stat_lock);
96         dev_priv->mm.object_count--;
97         dev_priv->mm.object_memory -= size;
98         spin_unlock(&dev_priv->mm.object_stat_lock);
99 }
100
101 static int
102 i915_gem_wait_for_error(struct i915_gpu_error *error)
103 {
104         int ret;
105
106         if (!i915_reset_in_progress(error))
107                 return 0;
108
109         /*
110          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
111          * userspace. If it takes that long something really bad is going on and
112          * we should simply try to bail out and fail as gracefully as possible.
113          */
114         ret = wait_event_interruptible_timeout(error->reset_queue,
115                                                !i915_reset_in_progress(error),
116                                                10*HZ);
117         if (ret == 0) {
118                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
119                 return -EIO;
120         } else if (ret < 0) {
121                 return ret;
122         } else {
123                 return 0;
124         }
125 }
126
127 int i915_mutex_lock_interruptible(struct drm_device *dev)
128 {
129         struct drm_i915_private *dev_priv = to_i915(dev);
130         int ret;
131
132         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
133         if (ret)
134                 return ret;
135
136         ret = mutex_lock_interruptible(&dev->struct_mutex);
137         if (ret)
138                 return ret;
139
140         return 0;
141 }
142
143 int
144 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
145                             struct drm_file *file)
146 {
147         struct drm_i915_private *dev_priv = to_i915(dev);
148         struct i915_ggtt *ggtt = &dev_priv->ggtt;
149         struct drm_i915_gem_get_aperture *args = data;
150         struct i915_vma *vma;
151         size_t pinned;
152
153         pinned = 0;
154         mutex_lock(&dev->struct_mutex);
155         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
156                 if (vma->pin_count)
157                         pinned += vma->node.size;
158         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
159                 if (vma->pin_count)
160                         pinned += vma->node.size;
161         mutex_unlock(&dev->struct_mutex);
162
163         args->aper_size = ggtt->base.total;
164         args->aper_available_size = args->aper_size - pinned;
165
166         return 0;
167 }
168
169 static int
170 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
171 {
172         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
173         char *vaddr = obj->phys_handle->vaddr;
174         struct sg_table *st;
175         struct scatterlist *sg;
176         int i;
177
178         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
179                 return -EINVAL;
180
181         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
182                 struct page *page;
183                 char *src;
184
185                 page = shmem_read_mapping_page(mapping, i);
186                 if (IS_ERR(page))
187                         return PTR_ERR(page);
188
189                 src = kmap_atomic(page);
190                 memcpy(vaddr, src, PAGE_SIZE);
191                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
192                 kunmap_atomic(src);
193
194                 put_page(page);
195                 vaddr += PAGE_SIZE;
196         }
197
198         i915_gem_chipset_flush(to_i915(obj->base.dev));
199
200         st = kmalloc(sizeof(*st), GFP_KERNEL);
201         if (st == NULL)
202                 return -ENOMEM;
203
204         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
205                 kfree(st);
206                 return -ENOMEM;
207         }
208
209         sg = st->sgl;
210         sg->offset = 0;
211         sg->length = obj->base.size;
212
213         sg_dma_address(sg) = obj->phys_handle->busaddr;
214         sg_dma_len(sg) = obj->base.size;
215
216         obj->pages = st;
217         return 0;
218 }
219
220 static void
221 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
222 {
223         int ret;
224
225         BUG_ON(obj->madv == __I915_MADV_PURGED);
226
227         ret = i915_gem_object_set_to_cpu_domain(obj, true);
228         if (WARN_ON(ret)) {
229                 /* In the event of a disaster, abandon all caches and
230                  * hope for the best.
231                  */
232                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
233         }
234
235         if (obj->madv == I915_MADV_DONTNEED)
236                 obj->dirty = 0;
237
238         if (obj->dirty) {
239                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
240                 char *vaddr = obj->phys_handle->vaddr;
241                 int i;
242
243                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
244                         struct page *page;
245                         char *dst;
246
247                         page = shmem_read_mapping_page(mapping, i);
248                         if (IS_ERR(page))
249                                 continue;
250
251                         dst = kmap_atomic(page);
252                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
253                         memcpy(dst, vaddr, PAGE_SIZE);
254                         kunmap_atomic(dst);
255
256                         set_page_dirty(page);
257                         if (obj->madv == I915_MADV_WILLNEED)
258                                 mark_page_accessed(page);
259                         put_page(page);
260                         vaddr += PAGE_SIZE;
261                 }
262                 obj->dirty = 0;
263         }
264
265         sg_free_table(obj->pages);
266         kfree(obj->pages);
267 }
268
269 static void
270 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
271 {
272         drm_pci_free(obj->base.dev, obj->phys_handle);
273 }
274
275 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
276         .get_pages = i915_gem_object_get_pages_phys,
277         .put_pages = i915_gem_object_put_pages_phys,
278         .release = i915_gem_object_release_phys,
279 };
280
281 int
282 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
283 {
284         struct i915_vma *vma;
285         LIST_HEAD(still_in_list);
286         int ret;
287
288         /* The vma will only be freed if it is marked as closed, and if we wait
289          * upon rendering to the vma, we may unbind anything in the list.
290          */
291         while ((vma = list_first_entry_or_null(&obj->vma_list,
292                                                struct i915_vma,
293                                                obj_link))) {
294                 list_move_tail(&vma->obj_link, &still_in_list);
295                 ret = i915_vma_unbind(vma);
296                 if (ret)
297                         break;
298         }
299         list_splice(&still_in_list, &obj->vma_list);
300
301         return ret;
302 }
303
304 int
305 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
306                             int align)
307 {
308         drm_dma_handle_t *phys;
309         int ret;
310
311         if (obj->phys_handle) {
312                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
313                         return -EBUSY;
314
315                 return 0;
316         }
317
318         if (obj->madv != I915_MADV_WILLNEED)
319                 return -EFAULT;
320
321         if (obj->base.filp == NULL)
322                 return -EINVAL;
323
324         ret = i915_gem_object_unbind(obj);
325         if (ret)
326                 return ret;
327
328         ret = i915_gem_object_put_pages(obj);
329         if (ret)
330                 return ret;
331
332         /* create a new object */
333         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
334         if (!phys)
335                 return -ENOMEM;
336
337         obj->phys_handle = phys;
338         obj->ops = &i915_gem_phys_ops;
339
340         return i915_gem_object_get_pages(obj);
341 }
342
343 static int
344 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
345                      struct drm_i915_gem_pwrite *args,
346                      struct drm_file *file_priv)
347 {
348         struct drm_device *dev = obj->base.dev;
349         void *vaddr = obj->phys_handle->vaddr + args->offset;
350         char __user *user_data = u64_to_user_ptr(args->data_ptr);
351         int ret = 0;
352
353         /* We manually control the domain here and pretend that it
354          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
355          */
356         ret = i915_gem_object_wait_rendering(obj, false);
357         if (ret)
358                 return ret;
359
360         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
361         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
362                 unsigned long unwritten;
363
364                 /* The physical object once assigned is fixed for the lifetime
365                  * of the obj, so we can safely drop the lock and continue
366                  * to access vaddr.
367                  */
368                 mutex_unlock(&dev->struct_mutex);
369                 unwritten = copy_from_user(vaddr, user_data, args->size);
370                 mutex_lock(&dev->struct_mutex);
371                 if (unwritten) {
372                         ret = -EFAULT;
373                         goto out;
374                 }
375         }
376
377         drm_clflush_virt_range(vaddr, args->size);
378         i915_gem_chipset_flush(to_i915(dev));
379
380 out:
381         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
382         return ret;
383 }
384
385 void *i915_gem_object_alloc(struct drm_device *dev)
386 {
387         struct drm_i915_private *dev_priv = to_i915(dev);
388         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
389 }
390
391 void i915_gem_object_free(struct drm_i915_gem_object *obj)
392 {
393         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
394         kmem_cache_free(dev_priv->objects, obj);
395 }
396
397 static int
398 i915_gem_create(struct drm_file *file,
399                 struct drm_device *dev,
400                 uint64_t size,
401                 uint32_t *handle_p)
402 {
403         struct drm_i915_gem_object *obj;
404         int ret;
405         u32 handle;
406
407         size = roundup(size, PAGE_SIZE);
408         if (size == 0)
409                 return -EINVAL;
410
411         /* Allocate the new object */
412         obj = i915_gem_object_create(dev, size);
413         if (IS_ERR(obj))
414                 return PTR_ERR(obj);
415
416         ret = drm_gem_handle_create(file, &obj->base, &handle);
417         /* drop reference from allocate - handle holds it now */
418         i915_gem_object_put_unlocked(obj);
419         if (ret)
420                 return ret;
421
422         *handle_p = handle;
423         return 0;
424 }
425
426 int
427 i915_gem_dumb_create(struct drm_file *file,
428                      struct drm_device *dev,
429                      struct drm_mode_create_dumb *args)
430 {
431         /* have to work out size/pitch and return them */
432         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
433         args->size = args->pitch * args->height;
434         return i915_gem_create(file, dev,
435                                args->size, &args->handle);
436 }
437
438 /**
439  * Creates a new mm object and returns a handle to it.
440  * @dev: drm device pointer
441  * @data: ioctl data blob
442  * @file: drm file pointer
443  */
444 int
445 i915_gem_create_ioctl(struct drm_device *dev, void *data,
446                       struct drm_file *file)
447 {
448         struct drm_i915_gem_create *args = data;
449
450         return i915_gem_create(file, dev,
451                                args->size, &args->handle);
452 }
453
454 static inline int
455 __copy_to_user_swizzled(char __user *cpu_vaddr,
456                         const char *gpu_vaddr, int gpu_offset,
457                         int length)
458 {
459         int ret, cpu_offset = 0;
460
461         while (length > 0) {
462                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
463                 int this_length = min(cacheline_end - gpu_offset, length);
464                 int swizzled_gpu_offset = gpu_offset ^ 64;
465
466                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
467                                      gpu_vaddr + swizzled_gpu_offset,
468                                      this_length);
469                 if (ret)
470                         return ret + length;
471
472                 cpu_offset += this_length;
473                 gpu_offset += this_length;
474                 length -= this_length;
475         }
476
477         return 0;
478 }
479
480 static inline int
481 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
482                           const char __user *cpu_vaddr,
483                           int length)
484 {
485         int ret, cpu_offset = 0;
486
487         while (length > 0) {
488                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
489                 int this_length = min(cacheline_end - gpu_offset, length);
490                 int swizzled_gpu_offset = gpu_offset ^ 64;
491
492                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
493                                        cpu_vaddr + cpu_offset,
494                                        this_length);
495                 if (ret)
496                         return ret + length;
497
498                 cpu_offset += this_length;
499                 gpu_offset += this_length;
500                 length -= this_length;
501         }
502
503         return 0;
504 }
505
506 /*
507  * Pins the specified object's pages and synchronizes the object with
508  * GPU accesses. Sets needs_clflush to non-zero if the caller should
509  * flush the object from the CPU cache.
510  */
511 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
512                                     int *needs_clflush)
513 {
514         int ret;
515
516         *needs_clflush = 0;
517
518         if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
519                 return -EINVAL;
520
521         ret = i915_gem_object_wait_rendering(obj, true);
522         if (ret)
523                 return ret;
524
525         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
526                 /* If we're not in the cpu read domain, set ourself into the gtt
527                  * read domain and manually flush cachelines (if required). This
528                  * optimizes for the case when the gpu will dirty the data
529                  * anyway again before the next pread happens. */
530                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
531                                                         obj->cache_level);
532         }
533
534         ret = i915_gem_object_get_pages(obj);
535         if (ret)
536                 return ret;
537
538         i915_gem_object_pin_pages(obj);
539
540         return ret;
541 }
542
543 /* Per-page copy function for the shmem pread fastpath.
544  * Flushes invalid cachelines before reading the target if
545  * needs_clflush is set. */
546 static int
547 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
548                  char __user *user_data,
549                  bool page_do_bit17_swizzling, bool needs_clflush)
550 {
551         char *vaddr;
552         int ret;
553
554         if (unlikely(page_do_bit17_swizzling))
555                 return -EINVAL;
556
557         vaddr = kmap_atomic(page);
558         if (needs_clflush)
559                 drm_clflush_virt_range(vaddr + shmem_page_offset,
560                                        page_length);
561         ret = __copy_to_user_inatomic(user_data,
562                                       vaddr + shmem_page_offset,
563                                       page_length);
564         kunmap_atomic(vaddr);
565
566         return ret ? -EFAULT : 0;
567 }
568
569 static void
570 shmem_clflush_swizzled_range(char *addr, unsigned long length,
571                              bool swizzled)
572 {
573         if (unlikely(swizzled)) {
574                 unsigned long start = (unsigned long) addr;
575                 unsigned long end = (unsigned long) addr + length;
576
577                 /* For swizzling simply ensure that we always flush both
578                  * channels. Lame, but simple and it works. Swizzled
579                  * pwrite/pread is far from a hotpath - current userspace
580                  * doesn't use it at all. */
581                 start = round_down(start, 128);
582                 end = round_up(end, 128);
583
584                 drm_clflush_virt_range((void *)start, end - start);
585         } else {
586                 drm_clflush_virt_range(addr, length);
587         }
588
589 }
590
591 /* Only difference to the fast-path function is that this can handle bit17
592  * and uses non-atomic copy and kmap functions. */
593 static int
594 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
595                  char __user *user_data,
596                  bool page_do_bit17_swizzling, bool needs_clflush)
597 {
598         char *vaddr;
599         int ret;
600
601         vaddr = kmap(page);
602         if (needs_clflush)
603                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
604                                              page_length,
605                                              page_do_bit17_swizzling);
606
607         if (page_do_bit17_swizzling)
608                 ret = __copy_to_user_swizzled(user_data,
609                                               vaddr, shmem_page_offset,
610                                               page_length);
611         else
612                 ret = __copy_to_user(user_data,
613                                      vaddr + shmem_page_offset,
614                                      page_length);
615         kunmap(page);
616
617         return ret ? - EFAULT : 0;
618 }
619
620 static inline unsigned long
621 slow_user_access(struct io_mapping *mapping,
622                  uint64_t page_base, int page_offset,
623                  char __user *user_data,
624                  unsigned long length, bool pwrite)
625 {
626         void __iomem *ioaddr;
627         void *vaddr;
628         uint64_t unwritten;
629
630         ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
631         /* We can use the cpu mem copy function because this is X86. */
632         vaddr = (void __force *)ioaddr + page_offset;
633         if (pwrite)
634                 unwritten = __copy_from_user(vaddr, user_data, length);
635         else
636                 unwritten = __copy_to_user(user_data, vaddr, length);
637
638         io_mapping_unmap(ioaddr);
639         return unwritten;
640 }
641
642 static int
643 i915_gem_gtt_pread(struct drm_device *dev,
644                    struct drm_i915_gem_object *obj, uint64_t size,
645                    uint64_t data_offset, uint64_t data_ptr)
646 {
647         struct drm_i915_private *dev_priv = to_i915(dev);
648         struct i915_ggtt *ggtt = &dev_priv->ggtt;
649         struct drm_mm_node node;
650         char __user *user_data;
651         uint64_t remain;
652         uint64_t offset;
653         int ret;
654
655         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
656         if (ret) {
657                 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
658                 if (ret)
659                         goto out;
660
661                 ret = i915_gem_object_get_pages(obj);
662                 if (ret) {
663                         remove_mappable_node(&node);
664                         goto out;
665                 }
666
667                 i915_gem_object_pin_pages(obj);
668         } else {
669                 node.start = i915_gem_obj_ggtt_offset(obj);
670                 node.allocated = false;
671                 ret = i915_gem_object_put_fence(obj);
672                 if (ret)
673                         goto out_unpin;
674         }
675
676         ret = i915_gem_object_set_to_gtt_domain(obj, false);
677         if (ret)
678                 goto out_unpin;
679
680         user_data = u64_to_user_ptr(data_ptr);
681         remain = size;
682         offset = data_offset;
683
684         mutex_unlock(&dev->struct_mutex);
685         if (likely(!i915.prefault_disable)) {
686                 ret = fault_in_multipages_writeable(user_data, remain);
687                 if (ret) {
688                         mutex_lock(&dev->struct_mutex);
689                         goto out_unpin;
690                 }
691         }
692
693         while (remain > 0) {
694                 /* Operation in this page
695                  *
696                  * page_base = page offset within aperture
697                  * page_offset = offset within page
698                  * page_length = bytes to copy for this page
699                  */
700                 u32 page_base = node.start;
701                 unsigned page_offset = offset_in_page(offset);
702                 unsigned page_length = PAGE_SIZE - page_offset;
703                 page_length = remain < page_length ? remain : page_length;
704                 if (node.allocated) {
705                         wmb();
706                         ggtt->base.insert_page(&ggtt->base,
707                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
708                                                node.start,
709                                                I915_CACHE_NONE, 0);
710                         wmb();
711                 } else {
712                         page_base += offset & PAGE_MASK;
713                 }
714                 /* This is a slow read/write as it tries to read from
715                  * and write to user memory which may result into page
716                  * faults, and so we cannot perform this under struct_mutex.
717                  */
718                 if (slow_user_access(ggtt->mappable, page_base,
719                                      page_offset, user_data,
720                                      page_length, false)) {
721                         ret = -EFAULT;
722                         break;
723                 }
724
725                 remain -= page_length;
726                 user_data += page_length;
727                 offset += page_length;
728         }
729
730         mutex_lock(&dev->struct_mutex);
731         if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
732                 /* The user has modified the object whilst we tried
733                  * reading from it, and we now have no idea what domain
734                  * the pages should be in. As we have just been touching
735                  * them directly, flush everything back to the GTT
736                  * domain.
737                  */
738                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
739         }
740
741 out_unpin:
742         if (node.allocated) {
743                 wmb();
744                 ggtt->base.clear_range(&ggtt->base,
745                                        node.start, node.size,
746                                        true);
747                 i915_gem_object_unpin_pages(obj);
748                 remove_mappable_node(&node);
749         } else {
750                 i915_gem_object_ggtt_unpin(obj);
751         }
752 out:
753         return ret;
754 }
755
756 static int
757 i915_gem_shmem_pread(struct drm_device *dev,
758                      struct drm_i915_gem_object *obj,
759                      struct drm_i915_gem_pread *args,
760                      struct drm_file *file)
761 {
762         char __user *user_data;
763         ssize_t remain;
764         loff_t offset;
765         int shmem_page_offset, page_length, ret = 0;
766         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
767         int prefaulted = 0;
768         int needs_clflush = 0;
769         struct sg_page_iter sg_iter;
770
771         if (!i915_gem_object_has_struct_page(obj))
772                 return -ENODEV;
773
774         user_data = u64_to_user_ptr(args->data_ptr);
775         remain = args->size;
776
777         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
778
779         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
780         if (ret)
781                 return ret;
782
783         offset = args->offset;
784
785         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
786                          offset >> PAGE_SHIFT) {
787                 struct page *page = sg_page_iter_page(&sg_iter);
788
789                 if (remain <= 0)
790                         break;
791
792                 /* Operation in this page
793                  *
794                  * shmem_page_offset = offset within page in shmem file
795                  * page_length = bytes to copy for this page
796                  */
797                 shmem_page_offset = offset_in_page(offset);
798                 page_length = remain;
799                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
800                         page_length = PAGE_SIZE - shmem_page_offset;
801
802                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
803                         (page_to_phys(page) & (1 << 17)) != 0;
804
805                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
806                                        user_data, page_do_bit17_swizzling,
807                                        needs_clflush);
808                 if (ret == 0)
809                         goto next_page;
810
811                 mutex_unlock(&dev->struct_mutex);
812
813                 if (likely(!i915.prefault_disable) && !prefaulted) {
814                         ret = fault_in_multipages_writeable(user_data, remain);
815                         /* Userspace is tricking us, but we've already clobbered
816                          * its pages with the prefault and promised to write the
817                          * data up to the first fault. Hence ignore any errors
818                          * and just continue. */
819                         (void)ret;
820                         prefaulted = 1;
821                 }
822
823                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
824                                        user_data, page_do_bit17_swizzling,
825                                        needs_clflush);
826
827                 mutex_lock(&dev->struct_mutex);
828
829                 if (ret)
830                         goto out;
831
832 next_page:
833                 remain -= page_length;
834                 user_data += page_length;
835                 offset += page_length;
836         }
837
838 out:
839         i915_gem_object_unpin_pages(obj);
840
841         return ret;
842 }
843
844 /**
845  * Reads data from the object referenced by handle.
846  * @dev: drm device pointer
847  * @data: ioctl data blob
848  * @file: drm file pointer
849  *
850  * On error, the contents of *data are undefined.
851  */
852 int
853 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
854                      struct drm_file *file)
855 {
856         struct drm_i915_gem_pread *args = data;
857         struct drm_i915_gem_object *obj;
858         int ret = 0;
859
860         if (args->size == 0)
861                 return 0;
862
863         if (!access_ok(VERIFY_WRITE,
864                        u64_to_user_ptr(args->data_ptr),
865                        args->size))
866                 return -EFAULT;
867
868         ret = i915_mutex_lock_interruptible(dev);
869         if (ret)
870                 return ret;
871
872         obj = i915_gem_object_lookup(file, args->handle);
873         if (!obj) {
874                 ret = -ENOENT;
875                 goto unlock;
876         }
877
878         /* Bounds check source.  */
879         if (args->offset > obj->base.size ||
880             args->size > obj->base.size - args->offset) {
881                 ret = -EINVAL;
882                 goto out;
883         }
884
885         trace_i915_gem_object_pread(obj, args->offset, args->size);
886
887         ret = i915_gem_shmem_pread(dev, obj, args, file);
888
889         /* pread for non shmem backed objects */
890         if (ret == -EFAULT || ret == -ENODEV) {
891                 intel_runtime_pm_get(to_i915(dev));
892                 ret = i915_gem_gtt_pread(dev, obj, args->size,
893                                         args->offset, args->data_ptr);
894                 intel_runtime_pm_put(to_i915(dev));
895         }
896
897 out:
898         i915_gem_object_put(obj);
899 unlock:
900         mutex_unlock(&dev->struct_mutex);
901         return ret;
902 }
903
904 /* This is the fast write path which cannot handle
905  * page faults in the source data
906  */
907
908 static inline int
909 fast_user_write(struct io_mapping *mapping,
910                 loff_t page_base, int page_offset,
911                 char __user *user_data,
912                 int length)
913 {
914         void __iomem *vaddr_atomic;
915         void *vaddr;
916         unsigned long unwritten;
917
918         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
919         /* We can use the cpu mem copy function because this is X86. */
920         vaddr = (void __force*)vaddr_atomic + page_offset;
921         unwritten = __copy_from_user_inatomic_nocache(vaddr,
922                                                       user_data, length);
923         io_mapping_unmap_atomic(vaddr_atomic);
924         return unwritten;
925 }
926
927 /**
928  * This is the fast pwrite path, where we copy the data directly from the
929  * user into the GTT, uncached.
930  * @i915: i915 device private data
931  * @obj: i915 gem object
932  * @args: pwrite arguments structure
933  * @file: drm file pointer
934  */
935 static int
936 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
937                          struct drm_i915_gem_object *obj,
938                          struct drm_i915_gem_pwrite *args,
939                          struct drm_file *file)
940 {
941         struct i915_ggtt *ggtt = &i915->ggtt;
942         struct drm_device *dev = obj->base.dev;
943         struct drm_mm_node node;
944         uint64_t remain, offset;
945         char __user *user_data;
946         int ret;
947         bool hit_slow_path = false;
948
949         if (obj->tiling_mode != I915_TILING_NONE)
950                 return -EFAULT;
951
952         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
953         if (ret) {
954                 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
955                 if (ret)
956                         goto out;
957
958                 ret = i915_gem_object_get_pages(obj);
959                 if (ret) {
960                         remove_mappable_node(&node);
961                         goto out;
962                 }
963
964                 i915_gem_object_pin_pages(obj);
965         } else {
966                 node.start = i915_gem_obj_ggtt_offset(obj);
967                 node.allocated = false;
968                 ret = i915_gem_object_put_fence(obj);
969                 if (ret)
970                         goto out_unpin;
971         }
972
973         ret = i915_gem_object_set_to_gtt_domain(obj, true);
974         if (ret)
975                 goto out_unpin;
976
977         intel_fb_obj_invalidate(obj, ORIGIN_GTT);
978         obj->dirty = true;
979
980         user_data = u64_to_user_ptr(args->data_ptr);
981         offset = args->offset;
982         remain = args->size;
983         while (remain) {
984                 /* Operation in this page
985                  *
986                  * page_base = page offset within aperture
987                  * page_offset = offset within page
988                  * page_length = bytes to copy for this page
989                  */
990                 u32 page_base = node.start;
991                 unsigned page_offset = offset_in_page(offset);
992                 unsigned page_length = PAGE_SIZE - page_offset;
993                 page_length = remain < page_length ? remain : page_length;
994                 if (node.allocated) {
995                         wmb(); /* flush the write before we modify the GGTT */
996                         ggtt->base.insert_page(&ggtt->base,
997                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
998                                                node.start, I915_CACHE_NONE, 0);
999                         wmb(); /* flush modifications to the GGTT (insert_page) */
1000                 } else {
1001                         page_base += offset & PAGE_MASK;
1002                 }
1003                 /* If we get a fault while copying data, then (presumably) our
1004                  * source page isn't available.  Return the error and we'll
1005                  * retry in the slow path.
1006                  * If the object is non-shmem backed, we retry again with the
1007                  * path that handles page fault.
1008                  */
1009                 if (fast_user_write(ggtt->mappable, page_base,
1010                                     page_offset, user_data, page_length)) {
1011                         hit_slow_path = true;
1012                         mutex_unlock(&dev->struct_mutex);
1013                         if (slow_user_access(ggtt->mappable,
1014                                              page_base,
1015                                              page_offset, user_data,
1016                                              page_length, true)) {
1017                                 ret = -EFAULT;
1018                                 mutex_lock(&dev->struct_mutex);
1019                                 goto out_flush;
1020                         }
1021
1022                         mutex_lock(&dev->struct_mutex);
1023                 }
1024
1025                 remain -= page_length;
1026                 user_data += page_length;
1027                 offset += page_length;
1028         }
1029
1030 out_flush:
1031         if (hit_slow_path) {
1032                 if (ret == 0 &&
1033                     (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1034                         /* The user has modified the object whilst we tried
1035                          * reading from it, and we now have no idea what domain
1036                          * the pages should be in. As we have just been touching
1037                          * them directly, flush everything back to the GTT
1038                          * domain.
1039                          */
1040                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1041                 }
1042         }
1043
1044         intel_fb_obj_flush(obj, false, ORIGIN_GTT);
1045 out_unpin:
1046         if (node.allocated) {
1047                 wmb();
1048                 ggtt->base.clear_range(&ggtt->base,
1049                                        node.start, node.size,
1050                                        true);
1051                 i915_gem_object_unpin_pages(obj);
1052                 remove_mappable_node(&node);
1053         } else {
1054                 i915_gem_object_ggtt_unpin(obj);
1055         }
1056 out:
1057         return ret;
1058 }
1059
1060 /* Per-page copy function for the shmem pwrite fastpath.
1061  * Flushes invalid cachelines before writing to the target if
1062  * needs_clflush_before is set and flushes out any written cachelines after
1063  * writing if needs_clflush is set. */
1064 static int
1065 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1066                   char __user *user_data,
1067                   bool page_do_bit17_swizzling,
1068                   bool needs_clflush_before,
1069                   bool needs_clflush_after)
1070 {
1071         char *vaddr;
1072         int ret;
1073
1074         if (unlikely(page_do_bit17_swizzling))
1075                 return -EINVAL;
1076
1077         vaddr = kmap_atomic(page);
1078         if (needs_clflush_before)
1079                 drm_clflush_virt_range(vaddr + shmem_page_offset,
1080                                        page_length);
1081         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1082                                         user_data, page_length);
1083         if (needs_clflush_after)
1084                 drm_clflush_virt_range(vaddr + shmem_page_offset,
1085                                        page_length);
1086         kunmap_atomic(vaddr);
1087
1088         return ret ? -EFAULT : 0;
1089 }
1090
1091 /* Only difference to the fast-path function is that this can handle bit17
1092  * and uses non-atomic copy and kmap functions. */
1093 static int
1094 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1095                   char __user *user_data,
1096                   bool page_do_bit17_swizzling,
1097                   bool needs_clflush_before,
1098                   bool needs_clflush_after)
1099 {
1100         char *vaddr;
1101         int ret;
1102
1103         vaddr = kmap(page);
1104         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1105                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1106                                              page_length,
1107                                              page_do_bit17_swizzling);
1108         if (page_do_bit17_swizzling)
1109                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1110                                                 user_data,
1111                                                 page_length);
1112         else
1113                 ret = __copy_from_user(vaddr + shmem_page_offset,
1114                                        user_data,
1115                                        page_length);
1116         if (needs_clflush_after)
1117                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1118                                              page_length,
1119                                              page_do_bit17_swizzling);
1120         kunmap(page);
1121
1122         return ret ? -EFAULT : 0;
1123 }
1124
1125 static int
1126 i915_gem_shmem_pwrite(struct drm_device *dev,
1127                       struct drm_i915_gem_object *obj,
1128                       struct drm_i915_gem_pwrite *args,
1129                       struct drm_file *file)
1130 {
1131         ssize_t remain;
1132         loff_t offset;
1133         char __user *user_data;
1134         int shmem_page_offset, page_length, ret = 0;
1135         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1136         int hit_slowpath = 0;
1137         int needs_clflush_after = 0;
1138         int needs_clflush_before = 0;
1139         struct sg_page_iter sg_iter;
1140
1141         user_data = u64_to_user_ptr(args->data_ptr);
1142         remain = args->size;
1143
1144         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1145
1146         ret = i915_gem_object_wait_rendering(obj, false);
1147         if (ret)
1148                 return ret;
1149
1150         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1151                 /* If we're not in the cpu write domain, set ourself into the gtt
1152                  * write domain and manually flush cachelines (if required). This
1153                  * optimizes for the case when the gpu will use the data
1154                  * right away and we therefore have to clflush anyway. */
1155                 needs_clflush_after = cpu_write_needs_clflush(obj);
1156         }
1157         /* Same trick applies to invalidate partially written cachelines read
1158          * before writing. */
1159         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1160                 needs_clflush_before =
1161                         !cpu_cache_is_coherent(dev, obj->cache_level);
1162
1163         ret = i915_gem_object_get_pages(obj);
1164         if (ret)
1165                 return ret;
1166
1167         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1168
1169         i915_gem_object_pin_pages(obj);
1170
1171         offset = args->offset;
1172         obj->dirty = 1;
1173
1174         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1175                          offset >> PAGE_SHIFT) {
1176                 struct page *page = sg_page_iter_page(&sg_iter);
1177                 int partial_cacheline_write;
1178
1179                 if (remain <= 0)
1180                         break;
1181
1182                 /* Operation in this page
1183                  *
1184                  * shmem_page_offset = offset within page in shmem file
1185                  * page_length = bytes to copy for this page
1186                  */
1187                 shmem_page_offset = offset_in_page(offset);
1188
1189                 page_length = remain;
1190                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1191                         page_length = PAGE_SIZE - shmem_page_offset;
1192
1193                 /* If we don't overwrite a cacheline completely we need to be
1194                  * careful to have up-to-date data by first clflushing. Don't
1195                  * overcomplicate things and flush the entire patch. */
1196                 partial_cacheline_write = needs_clflush_before &&
1197                         ((shmem_page_offset | page_length)
1198                                 & (boot_cpu_data.x86_clflush_size - 1));
1199
1200                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1201                         (page_to_phys(page) & (1 << 17)) != 0;
1202
1203                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1204                                         user_data, page_do_bit17_swizzling,
1205                                         partial_cacheline_write,
1206                                         needs_clflush_after);
1207                 if (ret == 0)
1208                         goto next_page;
1209
1210                 hit_slowpath = 1;
1211                 mutex_unlock(&dev->struct_mutex);
1212                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1213                                         user_data, page_do_bit17_swizzling,
1214                                         partial_cacheline_write,
1215                                         needs_clflush_after);
1216
1217                 mutex_lock(&dev->struct_mutex);
1218
1219                 if (ret)
1220                         goto out;
1221
1222 next_page:
1223                 remain -= page_length;
1224                 user_data += page_length;
1225                 offset += page_length;
1226         }
1227
1228 out:
1229         i915_gem_object_unpin_pages(obj);
1230
1231         if (hit_slowpath) {
1232                 /*
1233                  * Fixup: Flush cpu caches in case we didn't flush the dirty
1234                  * cachelines in-line while writing and the object moved
1235                  * out of the cpu write domain while we've dropped the lock.
1236                  */
1237                 if (!needs_clflush_after &&
1238                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1239                         if (i915_gem_clflush_object(obj, obj->pin_display))
1240                                 needs_clflush_after = true;
1241                 }
1242         }
1243
1244         if (needs_clflush_after)
1245                 i915_gem_chipset_flush(to_i915(dev));
1246         else
1247                 obj->cache_dirty = true;
1248
1249         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1250         return ret;
1251 }
1252
1253 /**
1254  * Writes data to the object referenced by handle.
1255  * @dev: drm device
1256  * @data: ioctl data blob
1257  * @file: drm file
1258  *
1259  * On error, the contents of the buffer that were to be modified are undefined.
1260  */
1261 int
1262 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1263                       struct drm_file *file)
1264 {
1265         struct drm_i915_private *dev_priv = to_i915(dev);
1266         struct drm_i915_gem_pwrite *args = data;
1267         struct drm_i915_gem_object *obj;
1268         int ret;
1269
1270         if (args->size == 0)
1271                 return 0;
1272
1273         if (!access_ok(VERIFY_READ,
1274                        u64_to_user_ptr(args->data_ptr),
1275                        args->size))
1276                 return -EFAULT;
1277
1278         if (likely(!i915.prefault_disable)) {
1279                 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1280                                                    args->size);
1281                 if (ret)
1282                         return -EFAULT;
1283         }
1284
1285         intel_runtime_pm_get(dev_priv);
1286
1287         ret = i915_mutex_lock_interruptible(dev);
1288         if (ret)
1289                 goto put_rpm;
1290
1291         obj = i915_gem_object_lookup(file, args->handle);
1292         if (!obj) {
1293                 ret = -ENOENT;
1294                 goto unlock;
1295         }
1296
1297         /* Bounds check destination. */
1298         if (args->offset > obj->base.size ||
1299             args->size > obj->base.size - args->offset) {
1300                 ret = -EINVAL;
1301                 goto out;
1302         }
1303
1304         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1305
1306         ret = -EFAULT;
1307         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1308          * it would end up going through the fenced access, and we'll get
1309          * different detiling behavior between reading and writing.
1310          * pread/pwrite currently are reading and writing from the CPU
1311          * perspective, requiring manual detiling by the client.
1312          */
1313         if (!i915_gem_object_has_struct_page(obj) ||
1314             cpu_write_needs_clflush(obj)) {
1315                 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1316                 /* Note that the gtt paths might fail with non-page-backed user
1317                  * pointers (e.g. gtt mappings when moving data between
1318                  * textures). Fallback to the shmem path in that case. */
1319         }
1320
1321         if (ret == -EFAULT || ret == -ENOSPC) {
1322                 if (obj->phys_handle)
1323                         ret = i915_gem_phys_pwrite(obj, args, file);
1324                 else if (i915_gem_object_has_struct_page(obj))
1325                         ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1326                 else
1327                         ret = -ENODEV;
1328         }
1329
1330 out:
1331         i915_gem_object_put(obj);
1332 unlock:
1333         mutex_unlock(&dev->struct_mutex);
1334 put_rpm:
1335         intel_runtime_pm_put(dev_priv);
1336
1337         return ret;
1338 }
1339
1340 /**
1341  * Ensures that all rendering to the object has completed and the object is
1342  * safe to unbind from the GTT or access from the CPU.
1343  * @obj: i915 gem object
1344  * @readonly: waiting for read access or write
1345  */
1346 int
1347 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1348                                bool readonly)
1349 {
1350         struct reservation_object *resv;
1351         struct i915_gem_active *active;
1352         unsigned long active_mask;
1353         int idx, ret;
1354
1355         lockdep_assert_held(&obj->base.dev->struct_mutex);
1356
1357         if (!readonly) {
1358                 active = obj->last_read;
1359                 active_mask = obj->active;
1360         } else {
1361                 active_mask = 1;
1362                 active = &obj->last_write;
1363         }
1364
1365         for_each_active(active_mask, idx) {
1366                 ret = i915_gem_active_wait(&active[idx],
1367                                            &obj->base.dev->struct_mutex);
1368                 if (ret)
1369                         return ret;
1370         }
1371
1372         resv = i915_gem_object_get_dmabuf_resv(obj);
1373         if (resv) {
1374                 long err;
1375
1376                 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
1377                                                           MAX_SCHEDULE_TIMEOUT);
1378                 if (err < 0)
1379                         return err;
1380         }
1381
1382         return 0;
1383 }
1384
1385 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1386  * as the object state may change during this call.
1387  */
1388 static __must_check int
1389 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1390                                             struct intel_rps_client *rps,
1391                                             bool readonly)
1392 {
1393         struct drm_device *dev = obj->base.dev;
1394         struct drm_i915_private *dev_priv = to_i915(dev);
1395         struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1396         struct i915_gem_active *active;
1397         unsigned long active_mask;
1398         int ret, i, n = 0;
1399
1400         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1401         BUG_ON(!dev_priv->mm.interruptible);
1402
1403         active_mask = obj->active;
1404         if (!active_mask)
1405                 return 0;
1406
1407         if (!readonly) {
1408                 active = obj->last_read;
1409         } else {
1410                 active_mask = 1;
1411                 active = &obj->last_write;
1412         }
1413
1414         for_each_active(active_mask, i) {
1415                 struct drm_i915_gem_request *req;
1416
1417                 req = i915_gem_active_get(&active[i],
1418                                           &obj->base.dev->struct_mutex);
1419                 if (req)
1420                         requests[n++] = req;
1421         }
1422
1423         mutex_unlock(&dev->struct_mutex);
1424         ret = 0;
1425         for (i = 0; ret == 0 && i < n; i++)
1426                 ret = i915_wait_request(requests[i], true, NULL, rps);
1427         mutex_lock(&dev->struct_mutex);
1428
1429         for (i = 0; i < n; i++)
1430                 i915_gem_request_put(requests[i]);
1431
1432         return ret;
1433 }
1434
1435 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1436 {
1437         struct drm_i915_file_private *fpriv = file->driver_priv;
1438         return &fpriv->rps;
1439 }
1440
1441 static enum fb_op_origin
1442 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1443 {
1444         return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1445                ORIGIN_GTT : ORIGIN_CPU;
1446 }
1447
1448 /**
1449  * Called when user space prepares to use an object with the CPU, either
1450  * through the mmap ioctl's mapping or a GTT mapping.
1451  * @dev: drm device
1452  * @data: ioctl data blob
1453  * @file: drm file
1454  */
1455 int
1456 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1457                           struct drm_file *file)
1458 {
1459         struct drm_i915_gem_set_domain *args = data;
1460         struct drm_i915_gem_object *obj;
1461         uint32_t read_domains = args->read_domains;
1462         uint32_t write_domain = args->write_domain;
1463         int ret;
1464
1465         /* Only handle setting domains to types used by the CPU. */
1466         if (write_domain & I915_GEM_GPU_DOMAINS)
1467                 return -EINVAL;
1468
1469         if (read_domains & I915_GEM_GPU_DOMAINS)
1470                 return -EINVAL;
1471
1472         /* Having something in the write domain implies it's in the read
1473          * domain, and only that read domain.  Enforce that in the request.
1474          */
1475         if (write_domain != 0 && read_domains != write_domain)
1476                 return -EINVAL;
1477
1478         ret = i915_mutex_lock_interruptible(dev);
1479         if (ret)
1480                 return ret;
1481
1482         obj = i915_gem_object_lookup(file, args->handle);
1483         if (!obj) {
1484                 ret = -ENOENT;
1485                 goto unlock;
1486         }
1487
1488         /* Try to flush the object off the GPU without holding the lock.
1489          * We will repeat the flush holding the lock in the normal manner
1490          * to catch cases where we are gazumped.
1491          */
1492         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1493                                                           to_rps_client(file),
1494                                                           !write_domain);
1495         if (ret)
1496                 goto unref;
1497
1498         if (read_domains & I915_GEM_DOMAIN_GTT)
1499                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1500         else
1501                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1502
1503         if (write_domain != 0)
1504                 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1505
1506 unref:
1507         i915_gem_object_put(obj);
1508 unlock:
1509         mutex_unlock(&dev->struct_mutex);
1510         return ret;
1511 }
1512
1513 /**
1514  * Called when user space has done writes to this buffer
1515  * @dev: drm device
1516  * @data: ioctl data blob
1517  * @file: drm file
1518  */
1519 int
1520 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1521                          struct drm_file *file)
1522 {
1523         struct drm_i915_gem_sw_finish *args = data;
1524         struct drm_i915_gem_object *obj;
1525         int ret = 0;
1526
1527         ret = i915_mutex_lock_interruptible(dev);
1528         if (ret)
1529                 return ret;
1530
1531         obj = i915_gem_object_lookup(file, args->handle);
1532         if (!obj) {
1533                 ret = -ENOENT;
1534                 goto unlock;
1535         }
1536
1537         /* Pinned buffers may be scanout, so flush the cache */
1538         if (obj->pin_display)
1539                 i915_gem_object_flush_cpu_write_domain(obj);
1540
1541         i915_gem_object_put(obj);
1542 unlock:
1543         mutex_unlock(&dev->struct_mutex);
1544         return ret;
1545 }
1546
1547 /**
1548  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1549  *                       it is mapped to.
1550  * @dev: drm device
1551  * @data: ioctl data blob
1552  * @file: drm file
1553  *
1554  * While the mapping holds a reference on the contents of the object, it doesn't
1555  * imply a ref on the object itself.
1556  *
1557  * IMPORTANT:
1558  *
1559  * DRM driver writers who look a this function as an example for how to do GEM
1560  * mmap support, please don't implement mmap support like here. The modern way
1561  * to implement DRM mmap support is with an mmap offset ioctl (like
1562  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1563  * That way debug tooling like valgrind will understand what's going on, hiding
1564  * the mmap call in a driver private ioctl will break that. The i915 driver only
1565  * does cpu mmaps this way because we didn't know better.
1566  */
1567 int
1568 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1569                     struct drm_file *file)
1570 {
1571         struct drm_i915_gem_mmap *args = data;
1572         struct drm_i915_gem_object *obj;
1573         unsigned long addr;
1574
1575         if (args->flags & ~(I915_MMAP_WC))
1576                 return -EINVAL;
1577
1578         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1579                 return -ENODEV;
1580
1581         obj = i915_gem_object_lookup(file, args->handle);
1582         if (!obj)
1583                 return -ENOENT;
1584
1585         /* prime objects have no backing filp to GEM mmap
1586          * pages from.
1587          */
1588         if (!obj->base.filp) {
1589                 i915_gem_object_put_unlocked(obj);
1590                 return -EINVAL;
1591         }
1592
1593         addr = vm_mmap(obj->base.filp, 0, args->size,
1594                        PROT_READ | PROT_WRITE, MAP_SHARED,
1595                        args->offset);
1596         if (args->flags & I915_MMAP_WC) {
1597                 struct mm_struct *mm = current->mm;
1598                 struct vm_area_struct *vma;
1599
1600                 if (down_write_killable(&mm->mmap_sem)) {
1601                         i915_gem_object_put_unlocked(obj);
1602                         return -EINTR;
1603                 }
1604                 vma = find_vma(mm, addr);
1605                 if (vma)
1606                         vma->vm_page_prot =
1607                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1608                 else
1609                         addr = -ENOMEM;
1610                 up_write(&mm->mmap_sem);
1611
1612                 /* This may race, but that's ok, it only gets set */
1613                 WRITE_ONCE(obj->has_wc_mmap, true);
1614         }
1615         i915_gem_object_put_unlocked(obj);
1616         if (IS_ERR((void *)addr))
1617                 return addr;
1618
1619         args->addr_ptr = (uint64_t) addr;
1620
1621         return 0;
1622 }
1623
1624 /**
1625  * i915_gem_fault - fault a page into the GTT
1626  * @vma: VMA in question
1627  * @vmf: fault info
1628  *
1629  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1630  * from userspace.  The fault handler takes care of binding the object to
1631  * the GTT (if needed), allocating and programming a fence register (again,
1632  * only if needed based on whether the old reg is still valid or the object
1633  * is tiled) and inserting a new PTE into the faulting process.
1634  *
1635  * Note that the faulting process may involve evicting existing objects
1636  * from the GTT and/or fence registers to make room.  So performance may
1637  * suffer if the GTT working set is large or there are few fence registers
1638  * left.
1639  */
1640 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1641 {
1642         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1643         struct drm_device *dev = obj->base.dev;
1644         struct drm_i915_private *dev_priv = to_i915(dev);
1645         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1646         struct i915_ggtt_view view = i915_ggtt_view_normal;
1647         pgoff_t page_offset;
1648         unsigned long pfn;
1649         int ret = 0;
1650         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1651
1652         intel_runtime_pm_get(dev_priv);
1653
1654         /* We don't use vmf->pgoff since that has the fake offset */
1655         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1656                 PAGE_SHIFT;
1657
1658         ret = i915_mutex_lock_interruptible(dev);
1659         if (ret)
1660                 goto out;
1661
1662         trace_i915_gem_object_fault(obj, page_offset, true, write);
1663
1664         /* Try to flush the object off the GPU first without holding the lock.
1665          * Upon reacquiring the lock, we will perform our sanity checks and then
1666          * repeat the flush holding the lock in the normal manner to catch cases
1667          * where we are gazumped.
1668          */
1669         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1670         if (ret)
1671                 goto unlock;
1672
1673         /* Access to snoopable pages through the GTT is incoherent. */
1674         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1675                 ret = -EFAULT;
1676                 goto unlock;
1677         }
1678
1679         /* Use a partial view if the object is bigger than the aperture. */
1680         if (obj->base.size >= ggtt->mappable_end &&
1681             obj->tiling_mode == I915_TILING_NONE) {
1682                 static const unsigned int chunk_size = 256; // 1 MiB
1683
1684                 memset(&view, 0, sizeof(view));
1685                 view.type = I915_GGTT_VIEW_PARTIAL;
1686                 view.params.partial.offset = rounddown(page_offset, chunk_size);
1687                 view.params.partial.size =
1688                         min_t(unsigned int,
1689                               chunk_size,
1690                               (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1691                               view.params.partial.offset);
1692         }
1693
1694         /* Now pin it into the GTT if needed */
1695         ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1696         if (ret)
1697                 goto unlock;
1698
1699         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1700         if (ret)
1701                 goto unpin;
1702
1703         ret = i915_gem_object_get_fence(obj);
1704         if (ret)
1705                 goto unpin;
1706
1707         /* Finally, remap it using the new GTT offset */
1708         pfn = ggtt->mappable_base +
1709                 i915_gem_obj_ggtt_offset_view(obj, &view);
1710         pfn >>= PAGE_SHIFT;
1711
1712         if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1713                 /* Overriding existing pages in partial view does not cause
1714                  * us any trouble as TLBs are still valid because the fault
1715                  * is due to userspace losing part of the mapping or never
1716                  * having accessed it before (at this partials' range).
1717                  */
1718                 unsigned long base = vma->vm_start +
1719                                      (view.params.partial.offset << PAGE_SHIFT);
1720                 unsigned int i;
1721
1722                 for (i = 0; i < view.params.partial.size; i++) {
1723                         ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1724                         if (ret)
1725                                 break;
1726                 }
1727
1728                 obj->fault_mappable = true;
1729         } else {
1730                 if (!obj->fault_mappable) {
1731                         unsigned long size = min_t(unsigned long,
1732                                                    vma->vm_end - vma->vm_start,
1733                                                    obj->base.size);
1734                         int i;
1735
1736                         for (i = 0; i < size >> PAGE_SHIFT; i++) {
1737                                 ret = vm_insert_pfn(vma,
1738                                                     (unsigned long)vma->vm_start + i * PAGE_SIZE,
1739                                                     pfn + i);
1740                                 if (ret)
1741                                         break;
1742                         }
1743
1744                         obj->fault_mappable = true;
1745                 } else
1746                         ret = vm_insert_pfn(vma,
1747                                             (unsigned long)vmf->virtual_address,
1748                                             pfn + page_offset);
1749         }
1750 unpin:
1751         i915_gem_object_ggtt_unpin_view(obj, &view);
1752 unlock:
1753         mutex_unlock(&dev->struct_mutex);
1754 out:
1755         switch (ret) {
1756         case -EIO:
1757                 /*
1758                  * We eat errors when the gpu is terminally wedged to avoid
1759                  * userspace unduly crashing (gl has no provisions for mmaps to
1760                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1761                  * and so needs to be reported.
1762                  */
1763                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1764                         ret = VM_FAULT_SIGBUS;
1765                         break;
1766                 }
1767         case -EAGAIN:
1768                 /*
1769                  * EAGAIN means the gpu is hung and we'll wait for the error
1770                  * handler to reset everything when re-faulting in
1771                  * i915_mutex_lock_interruptible.
1772                  */
1773         case 0:
1774         case -ERESTARTSYS:
1775         case -EINTR:
1776         case -EBUSY:
1777                 /*
1778                  * EBUSY is ok: this just means that another thread
1779                  * already did the job.
1780                  */
1781                 ret = VM_FAULT_NOPAGE;
1782                 break;
1783         case -ENOMEM:
1784                 ret = VM_FAULT_OOM;
1785                 break;
1786         case -ENOSPC:
1787         case -EFAULT:
1788                 ret = VM_FAULT_SIGBUS;
1789                 break;
1790         default:
1791                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1792                 ret = VM_FAULT_SIGBUS;
1793                 break;
1794         }
1795
1796         intel_runtime_pm_put(dev_priv);
1797         return ret;
1798 }
1799
1800 /**
1801  * i915_gem_release_mmap - remove physical page mappings
1802  * @obj: obj in question
1803  *
1804  * Preserve the reservation of the mmapping with the DRM core code, but
1805  * relinquish ownership of the pages back to the system.
1806  *
1807  * It is vital that we remove the page mapping if we have mapped a tiled
1808  * object through the GTT and then lose the fence register due to
1809  * resource pressure. Similarly if the object has been moved out of the
1810  * aperture, than pages mapped into userspace must be revoked. Removing the
1811  * mapping will then trigger a page fault on the next user access, allowing
1812  * fixup by i915_gem_fault().
1813  */
1814 void
1815 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1816 {
1817         /* Serialisation between user GTT access and our code depends upon
1818          * revoking the CPU's PTE whilst the mutex is held. The next user
1819          * pagefault then has to wait until we release the mutex.
1820          */
1821         lockdep_assert_held(&obj->base.dev->struct_mutex);
1822
1823         if (!obj->fault_mappable)
1824                 return;
1825
1826         drm_vma_node_unmap(&obj->base.vma_node,
1827                            obj->base.dev->anon_inode->i_mapping);
1828
1829         /* Ensure that the CPU's PTE are revoked and there are not outstanding
1830          * memory transactions from userspace before we return. The TLB
1831          * flushing implied above by changing the PTE above *should* be
1832          * sufficient, an extra barrier here just provides us with a bit
1833          * of paranoid documentation about our requirement to serialise
1834          * memory writes before touching registers / GSM.
1835          */
1836         wmb();
1837
1838         obj->fault_mappable = false;
1839 }
1840
1841 void
1842 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1843 {
1844         struct drm_i915_gem_object *obj;
1845
1846         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1847                 i915_gem_release_mmap(obj);
1848 }
1849
1850 /**
1851  * i915_gem_get_ggtt_size - return required global GTT size for an object
1852  * @dev_priv: i915 device
1853  * @size: object size
1854  * @tiling_mode: tiling mode
1855  *
1856  * Return the required global GTT size for an object, taking into account
1857  * potential fence register mapping.
1858  */
1859 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1860                            u64 size, int tiling_mode)
1861 {
1862         u64 ggtt_size;
1863
1864         GEM_BUG_ON(size == 0);
1865
1866         if (INTEL_GEN(dev_priv) >= 4 ||
1867             tiling_mode == I915_TILING_NONE)
1868                 return size;
1869
1870         /* Previous chips need a power-of-two fence region when tiling */
1871         if (IS_GEN3(dev_priv))
1872                 ggtt_size = 1024*1024;
1873         else
1874                 ggtt_size = 512*1024;
1875
1876         while (ggtt_size < size)
1877                 ggtt_size <<= 1;
1878
1879         return ggtt_size;
1880 }
1881
1882 /**
1883  * i915_gem_get_ggtt_alignment - return required global GTT alignment
1884  * @dev_priv: i915 device
1885  * @size: object size
1886  * @tiling_mode: tiling mode
1887  * @fenced: is fenced alignment required or not
1888  *
1889  * Return the required global GTT alignment for an object, taking into account
1890  * potential fence register mapping.
1891  */
1892 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
1893                                 int tiling_mode, bool fenced)
1894 {
1895         GEM_BUG_ON(size == 0);
1896
1897         /*
1898          * Minimum alignment is 4k (GTT page size), but might be greater
1899          * if a fence register is needed for the object.
1900          */
1901         if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
1902             tiling_mode == I915_TILING_NONE)
1903                 return 4096;
1904
1905         /*
1906          * Previous chips need to be aligned to the size of the smallest
1907          * fence register that can contain the object.
1908          */
1909         return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
1910 }
1911
1912 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1913 {
1914         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1915         int ret;
1916
1917         dev_priv->mm.shrinker_no_lock_stealing = true;
1918
1919         ret = drm_gem_create_mmap_offset(&obj->base);
1920         if (ret != -ENOSPC)
1921                 goto out;
1922
1923         /* Badly fragmented mmap space? The only way we can recover
1924          * space is by destroying unwanted objects. We can't randomly release
1925          * mmap_offsets as userspace expects them to be persistent for the
1926          * lifetime of the objects. The closest we can is to release the
1927          * offsets on purgeable objects by truncating it and marking it purged,
1928          * which prevents userspace from ever using that object again.
1929          */
1930         i915_gem_shrink(dev_priv,
1931                         obj->base.size >> PAGE_SHIFT,
1932                         I915_SHRINK_BOUND |
1933                         I915_SHRINK_UNBOUND |
1934                         I915_SHRINK_PURGEABLE);
1935         ret = drm_gem_create_mmap_offset(&obj->base);
1936         if (ret != -ENOSPC)
1937                 goto out;
1938
1939         i915_gem_shrink_all(dev_priv);
1940         ret = drm_gem_create_mmap_offset(&obj->base);
1941 out:
1942         dev_priv->mm.shrinker_no_lock_stealing = false;
1943
1944         return ret;
1945 }
1946
1947 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1948 {
1949         drm_gem_free_mmap_offset(&obj->base);
1950 }
1951
1952 int
1953 i915_gem_mmap_gtt(struct drm_file *file,
1954                   struct drm_device *dev,
1955                   uint32_t handle,
1956                   uint64_t *offset)
1957 {
1958         struct drm_i915_gem_object *obj;
1959         int ret;
1960
1961         ret = i915_mutex_lock_interruptible(dev);
1962         if (ret)
1963                 return ret;
1964
1965         obj = i915_gem_object_lookup(file, handle);
1966         if (!obj) {
1967                 ret = -ENOENT;
1968                 goto unlock;
1969         }
1970
1971         if (obj->madv != I915_MADV_WILLNEED) {
1972                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1973                 ret = -EFAULT;
1974                 goto out;
1975         }
1976
1977         ret = i915_gem_object_create_mmap_offset(obj);
1978         if (ret)
1979                 goto out;
1980
1981         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1982
1983 out:
1984         i915_gem_object_put(obj);
1985 unlock:
1986         mutex_unlock(&dev->struct_mutex);
1987         return ret;
1988 }
1989
1990 /**
1991  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1992  * @dev: DRM device
1993  * @data: GTT mapping ioctl data
1994  * @file: GEM object info
1995  *
1996  * Simply returns the fake offset to userspace so it can mmap it.
1997  * The mmap call will end up in drm_gem_mmap(), which will set things
1998  * up so we can get faults in the handler above.
1999  *
2000  * The fault handler will take care of binding the object into the GTT
2001  * (since it may have been evicted to make room for something), allocating
2002  * a fence register, and mapping the appropriate aperture address into
2003  * userspace.
2004  */
2005 int
2006 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2007                         struct drm_file *file)
2008 {
2009         struct drm_i915_gem_mmap_gtt *args = data;
2010
2011         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2012 }
2013
2014 /* Immediately discard the backing storage */
2015 static void
2016 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2017 {
2018         i915_gem_object_free_mmap_offset(obj);
2019
2020         if (obj->base.filp == NULL)
2021                 return;
2022
2023         /* Our goal here is to return as much of the memory as
2024          * is possible back to the system as we are called from OOM.
2025          * To do this we must instruct the shmfs to drop all of its
2026          * backing pages, *now*.
2027          */
2028         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2029         obj->madv = __I915_MADV_PURGED;
2030 }
2031
2032 /* Try to discard unwanted pages */
2033 static void
2034 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2035 {
2036         struct address_space *mapping;
2037
2038         switch (obj->madv) {
2039         case I915_MADV_DONTNEED:
2040                 i915_gem_object_truncate(obj);
2041         case __I915_MADV_PURGED:
2042                 return;
2043         }
2044
2045         if (obj->base.filp == NULL)
2046                 return;
2047
2048         mapping = file_inode(obj->base.filp)->i_mapping,
2049         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2050 }
2051
2052 static void
2053 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2054 {
2055         struct sgt_iter sgt_iter;
2056         struct page *page;
2057         int ret;
2058
2059         BUG_ON(obj->madv == __I915_MADV_PURGED);
2060
2061         ret = i915_gem_object_set_to_cpu_domain(obj, true);
2062         if (WARN_ON(ret)) {
2063                 /* In the event of a disaster, abandon all caches and
2064                  * hope for the best.
2065                  */
2066                 i915_gem_clflush_object(obj, true);
2067                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2068         }
2069
2070         i915_gem_gtt_finish_object(obj);
2071
2072         if (i915_gem_object_needs_bit17_swizzle(obj))
2073                 i915_gem_object_save_bit_17_swizzle(obj);
2074
2075         if (obj->madv == I915_MADV_DONTNEED)
2076                 obj->dirty = 0;
2077
2078         for_each_sgt_page(page, sgt_iter, obj->pages) {
2079                 if (obj->dirty)
2080                         set_page_dirty(page);
2081
2082                 if (obj->madv == I915_MADV_WILLNEED)
2083                         mark_page_accessed(page);
2084
2085                 put_page(page);
2086         }
2087         obj->dirty = 0;
2088
2089         sg_free_table(obj->pages);
2090         kfree(obj->pages);
2091 }
2092
2093 int
2094 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2095 {
2096         const struct drm_i915_gem_object_ops *ops = obj->ops;
2097
2098         if (obj->pages == NULL)
2099                 return 0;
2100
2101         if (obj->pages_pin_count)
2102                 return -EBUSY;
2103
2104         GEM_BUG_ON(obj->bind_count);
2105
2106         /* ->put_pages might need to allocate memory for the bit17 swizzle
2107          * array, hence protect them from being reaped by removing them from gtt
2108          * lists early. */
2109         list_del(&obj->global_list);
2110
2111         if (obj->mapping) {
2112                 if (is_vmalloc_addr(obj->mapping))
2113                         vunmap(obj->mapping);
2114                 else
2115                         kunmap(kmap_to_page(obj->mapping));
2116                 obj->mapping = NULL;
2117         }
2118
2119         ops->put_pages(obj);
2120         obj->pages = NULL;
2121
2122         i915_gem_object_invalidate(obj);
2123
2124         return 0;
2125 }
2126
2127 static int
2128 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2129 {
2130         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2131         int page_count, i;
2132         struct address_space *mapping;
2133         struct sg_table *st;
2134         struct scatterlist *sg;
2135         struct sgt_iter sgt_iter;
2136         struct page *page;
2137         unsigned long last_pfn = 0;     /* suppress gcc warning */
2138         int ret;
2139         gfp_t gfp;
2140
2141         /* Assert that the object is not currently in any GPU domain. As it
2142          * wasn't in the GTT, there shouldn't be any way it could have been in
2143          * a GPU cache
2144          */
2145         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2146         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2147
2148         st = kmalloc(sizeof(*st), GFP_KERNEL);
2149         if (st == NULL)
2150                 return -ENOMEM;
2151
2152         page_count = obj->base.size / PAGE_SIZE;
2153         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2154                 kfree(st);
2155                 return -ENOMEM;
2156         }
2157
2158         /* Get the list of pages out of our struct file.  They'll be pinned
2159          * at this point until we release them.
2160          *
2161          * Fail silently without starting the shrinker
2162          */
2163         mapping = file_inode(obj->base.filp)->i_mapping;
2164         gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2165         gfp |= __GFP_NORETRY | __GFP_NOWARN;
2166         sg = st->sgl;
2167         st->nents = 0;
2168         for (i = 0; i < page_count; i++) {
2169                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2170                 if (IS_ERR(page)) {
2171                         i915_gem_shrink(dev_priv,
2172                                         page_count,
2173                                         I915_SHRINK_BOUND |
2174                                         I915_SHRINK_UNBOUND |
2175                                         I915_SHRINK_PURGEABLE);
2176                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2177                 }
2178                 if (IS_ERR(page)) {
2179                         /* We've tried hard to allocate the memory by reaping
2180                          * our own buffer, now let the real VM do its job and
2181                          * go down in flames if truly OOM.
2182                          */
2183                         i915_gem_shrink_all(dev_priv);
2184                         page = shmem_read_mapping_page(mapping, i);
2185                         if (IS_ERR(page)) {
2186                                 ret = PTR_ERR(page);
2187                                 goto err_pages;
2188                         }
2189                 }
2190 #ifdef CONFIG_SWIOTLB
2191                 if (swiotlb_nr_tbl()) {
2192                         st->nents++;
2193                         sg_set_page(sg, page, PAGE_SIZE, 0);
2194                         sg = sg_next(sg);
2195                         continue;
2196                 }
2197 #endif
2198                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2199                         if (i)
2200                                 sg = sg_next(sg);
2201                         st->nents++;
2202                         sg_set_page(sg, page, PAGE_SIZE, 0);
2203                 } else {
2204                         sg->length += PAGE_SIZE;
2205                 }
2206                 last_pfn = page_to_pfn(page);
2207
2208                 /* Check that the i965g/gm workaround works. */
2209                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2210         }
2211 #ifdef CONFIG_SWIOTLB
2212         if (!swiotlb_nr_tbl())
2213 #endif
2214                 sg_mark_end(sg);
2215         obj->pages = st;
2216
2217         ret = i915_gem_gtt_prepare_object(obj);
2218         if (ret)
2219                 goto err_pages;
2220
2221         if (i915_gem_object_needs_bit17_swizzle(obj))
2222                 i915_gem_object_do_bit_17_swizzle(obj);
2223
2224         if (obj->tiling_mode != I915_TILING_NONE &&
2225             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2226                 i915_gem_object_pin_pages(obj);
2227
2228         return 0;
2229
2230 err_pages:
2231         sg_mark_end(sg);
2232         for_each_sgt_page(page, sgt_iter, st)
2233                 put_page(page);
2234         sg_free_table(st);
2235         kfree(st);
2236
2237         /* shmemfs first checks if there is enough memory to allocate the page
2238          * and reports ENOSPC should there be insufficient, along with the usual
2239          * ENOMEM for a genuine allocation failure.
2240          *
2241          * We use ENOSPC in our driver to mean that we have run out of aperture
2242          * space and so want to translate the error from shmemfs back to our
2243          * usual understanding of ENOMEM.
2244          */
2245         if (ret == -ENOSPC)
2246                 ret = -ENOMEM;
2247
2248         return ret;
2249 }
2250
2251 /* Ensure that the associated pages are gathered from the backing storage
2252  * and pinned into our object. i915_gem_object_get_pages() may be called
2253  * multiple times before they are released by a single call to
2254  * i915_gem_object_put_pages() - once the pages are no longer referenced
2255  * either as a result of memory pressure (reaping pages under the shrinker)
2256  * or as the object is itself released.
2257  */
2258 int
2259 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2260 {
2261         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2262         const struct drm_i915_gem_object_ops *ops = obj->ops;
2263         int ret;
2264
2265         if (obj->pages)
2266                 return 0;
2267
2268         if (obj->madv != I915_MADV_WILLNEED) {
2269                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2270                 return -EFAULT;
2271         }
2272
2273         BUG_ON(obj->pages_pin_count);
2274
2275         ret = ops->get_pages(obj);
2276         if (ret)
2277                 return ret;
2278
2279         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2280
2281         obj->get_page.sg = obj->pages->sgl;
2282         obj->get_page.last = 0;
2283
2284         return 0;
2285 }
2286
2287 /* The 'mapping' part of i915_gem_object_pin_map() below */
2288 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2289 {
2290         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2291         struct sg_table *sgt = obj->pages;
2292         struct sgt_iter sgt_iter;
2293         struct page *page;
2294         struct page *stack_pages[32];
2295         struct page **pages = stack_pages;
2296         unsigned long i = 0;
2297         void *addr;
2298
2299         /* A single page can always be kmapped */
2300         if (n_pages == 1)
2301                 return kmap(sg_page(sgt->sgl));
2302
2303         if (n_pages > ARRAY_SIZE(stack_pages)) {
2304                 /* Too big for stack -- allocate temporary array instead */
2305                 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2306                 if (!pages)
2307                         return NULL;
2308         }
2309
2310         for_each_sgt_page(page, sgt_iter, sgt)
2311                 pages[i++] = page;
2312
2313         /* Check that we have the expected number of pages */
2314         GEM_BUG_ON(i != n_pages);
2315
2316         addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2317
2318         if (pages != stack_pages)
2319                 drm_free_large(pages);
2320
2321         return addr;
2322 }
2323
2324 /* get, pin, and map the pages of the object into kernel space */
2325 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2326 {
2327         int ret;
2328
2329         lockdep_assert_held(&obj->base.dev->struct_mutex);
2330
2331         ret = i915_gem_object_get_pages(obj);
2332         if (ret)
2333                 return ERR_PTR(ret);
2334
2335         i915_gem_object_pin_pages(obj);
2336
2337         if (!obj->mapping) {
2338                 obj->mapping = i915_gem_object_map(obj);
2339                 if (!obj->mapping) {
2340                         i915_gem_object_unpin_pages(obj);
2341                         return ERR_PTR(-ENOMEM);
2342                 }
2343         }
2344
2345         return obj->mapping;
2346 }
2347
2348 static void
2349 i915_gem_object_retire__write(struct i915_gem_active *active,
2350                               struct drm_i915_gem_request *request)
2351 {
2352         struct drm_i915_gem_object *obj =
2353                 container_of(active, struct drm_i915_gem_object, last_write);
2354
2355         intel_fb_obj_flush(obj, true, ORIGIN_CS);
2356 }
2357
2358 static void
2359 i915_gem_object_retire__read(struct i915_gem_active *active,
2360                              struct drm_i915_gem_request *request)
2361 {
2362         int idx = request->engine->id;
2363         struct drm_i915_gem_object *obj =
2364                 container_of(active, struct drm_i915_gem_object, last_read[idx]);
2365
2366         GEM_BUG_ON((obj->active & (1 << idx)) == 0);
2367
2368         obj->active &= ~(1 << idx);
2369         if (obj->active)
2370                 return;
2371
2372         /* Bump our place on the bound list to keep it roughly in LRU order
2373          * so that we don't steal from recently used but inactive objects
2374          * (unless we are forced to ofc!)
2375          */
2376         if (obj->bind_count)
2377                 list_move_tail(&obj->global_list,
2378                                &request->i915->mm.bound_list);
2379
2380         i915_gem_object_put(obj);
2381 }
2382
2383 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2384 {
2385         unsigned long elapsed;
2386
2387         if (ctx->hang_stats.banned)
2388                 return true;
2389
2390         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2391         if (ctx->hang_stats.ban_period_seconds &&
2392             elapsed <= ctx->hang_stats.ban_period_seconds) {
2393                 DRM_DEBUG("context hanging too fast, banning!\n");
2394                 return true;
2395         }
2396
2397         return false;
2398 }
2399
2400 static void i915_set_reset_status(struct i915_gem_context *ctx,
2401                                   const bool guilty)
2402 {
2403         struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2404
2405         if (guilty) {
2406                 hs->banned = i915_context_is_banned(ctx);
2407                 hs->batch_active++;
2408                 hs->guilty_ts = get_seconds();
2409         } else {
2410                 hs->batch_pending++;
2411         }
2412 }
2413
2414 struct drm_i915_gem_request *
2415 i915_gem_find_active_request(struct intel_engine_cs *engine)
2416 {
2417         struct drm_i915_gem_request *request;
2418
2419         /* We are called by the error capture and reset at a random
2420          * point in time. In particular, note that neither is crucially
2421          * ordered with an interrupt. After a hang, the GPU is dead and we
2422          * assume that no more writes can happen (we waited long enough for
2423          * all writes that were in transaction to be flushed) - adding an
2424          * extra delay for a recent interrupt is pointless. Hence, we do
2425          * not need an engine->irq_seqno_barrier() before the seqno reads.
2426          */
2427         list_for_each_entry(request, &engine->request_list, link) {
2428                 if (i915_gem_request_completed(request))
2429                         continue;
2430
2431                 return request;
2432         }
2433
2434         return NULL;
2435 }
2436
2437 static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
2438 {
2439         struct drm_i915_gem_request *request;
2440         bool ring_hung;
2441
2442         request = i915_gem_find_active_request(engine);
2443         if (request == NULL)
2444                 return;
2445
2446         ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2447
2448         i915_set_reset_status(request->ctx, ring_hung);
2449         list_for_each_entry_continue(request, &engine->request_list, link)
2450                 i915_set_reset_status(request->ctx, false);
2451 }
2452
2453 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
2454 {
2455         struct intel_ring *ring;
2456
2457         /* Mark all pending requests as complete so that any concurrent
2458          * (lockless) lookup doesn't try and wait upon the request as we
2459          * reset it.
2460          */
2461         intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2462
2463         /*
2464          * Clear the execlists queue up before freeing the requests, as those
2465          * are the ones that keep the context and ringbuffer backing objects
2466          * pinned in place.
2467          */
2468
2469         if (i915.enable_execlists) {
2470                 /* Ensure irq handler finishes or is cancelled. */
2471                 tasklet_kill(&engine->irq_tasklet);
2472
2473                 intel_execlists_cancel_requests(engine);
2474         }
2475
2476         /*
2477          * We must free the requests after all the corresponding objects have
2478          * been moved off active lists. Which is the same order as the normal
2479          * retire_requests function does. This is important if object hold
2480          * implicit references on things like e.g. ppgtt address spaces through
2481          * the request.
2482          */
2483         if (!list_empty(&engine->request_list)) {
2484                 struct drm_i915_gem_request *request;
2485
2486                 request = list_last_entry(&engine->request_list,
2487                                           struct drm_i915_gem_request,
2488                                           link);
2489
2490                 i915_gem_request_retire_upto(request);
2491         }
2492
2493         /* Having flushed all requests from all queues, we know that all
2494          * ringbuffers must now be empty. However, since we do not reclaim
2495          * all space when retiring the request (to prevent HEADs colliding
2496          * with rapid ringbuffer wraparound) the amount of available space
2497          * upon reset is less than when we start. Do one more pass over
2498          * all the ringbuffers to reset last_retired_head.
2499          */
2500         list_for_each_entry(ring, &engine->buffers, link) {
2501                 ring->last_retired_head = ring->tail;
2502                 intel_ring_update_space(ring);
2503         }
2504
2505         engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2506 }
2507
2508 void i915_gem_reset(struct drm_device *dev)
2509 {
2510         struct drm_i915_private *dev_priv = to_i915(dev);
2511         struct intel_engine_cs *engine;
2512
2513         /*
2514          * Before we free the objects from the requests, we need to inspect
2515          * them for finding the guilty party. As the requests only borrow
2516          * their reference to the objects, the inspection must be done first.
2517          */
2518         for_each_engine(engine, dev_priv)
2519                 i915_gem_reset_engine_status(engine);
2520
2521         for_each_engine(engine, dev_priv)
2522                 i915_gem_reset_engine_cleanup(engine);
2523         mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2524
2525         i915_gem_context_reset(dev);
2526
2527         i915_gem_restore_fences(dev);
2528 }
2529
2530 static void
2531 i915_gem_retire_work_handler(struct work_struct *work)
2532 {
2533         struct drm_i915_private *dev_priv =
2534                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2535         struct drm_device *dev = &dev_priv->drm;
2536
2537         /* Come back later if the device is busy... */
2538         if (mutex_trylock(&dev->struct_mutex)) {
2539                 i915_gem_retire_requests(dev_priv);
2540                 mutex_unlock(&dev->struct_mutex);
2541         }
2542
2543         /* Keep the retire handler running until we are finally idle.
2544          * We do not need to do this test under locking as in the worst-case
2545          * we queue the retire worker once too often.
2546          */
2547         if (READ_ONCE(dev_priv->gt.awake)) {
2548                 i915_queue_hangcheck(dev_priv);
2549                 queue_delayed_work(dev_priv->wq,
2550                                    &dev_priv->gt.retire_work,
2551                                    round_jiffies_up_relative(HZ));
2552         }
2553 }
2554
2555 static void
2556 i915_gem_idle_work_handler(struct work_struct *work)
2557 {
2558         struct drm_i915_private *dev_priv =
2559                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2560         struct drm_device *dev = &dev_priv->drm;
2561         struct intel_engine_cs *engine;
2562         unsigned int stuck_engines;
2563         bool rearm_hangcheck;
2564
2565         if (!READ_ONCE(dev_priv->gt.awake))
2566                 return;
2567
2568         if (READ_ONCE(dev_priv->gt.active_engines))
2569                 return;
2570
2571         rearm_hangcheck =
2572                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2573
2574         if (!mutex_trylock(&dev->struct_mutex)) {
2575                 /* Currently busy, come back later */
2576                 mod_delayed_work(dev_priv->wq,
2577                                  &dev_priv->gt.idle_work,
2578                                  msecs_to_jiffies(50));
2579                 goto out_rearm;
2580         }
2581
2582         if (dev_priv->gt.active_engines)
2583                 goto out_unlock;
2584
2585         for_each_engine(engine, dev_priv)
2586                 i915_gem_batch_pool_fini(&engine->batch_pool);
2587
2588         GEM_BUG_ON(!dev_priv->gt.awake);
2589         dev_priv->gt.awake = false;
2590         rearm_hangcheck = false;
2591
2592         /* As we have disabled hangcheck, we need to unstick any waiters still
2593          * hanging around. However, as we may be racing against the interrupt
2594          * handler or the waiters themselves, we skip enabling the fake-irq.
2595          */
2596         stuck_engines = intel_kick_waiters(dev_priv);
2597         if (unlikely(stuck_engines))
2598                 DRM_DEBUG_DRIVER("kicked stuck waiters (%x)...missed irq?\n",
2599                                  stuck_engines);
2600
2601         if (INTEL_GEN(dev_priv) >= 6)
2602                 gen6_rps_idle(dev_priv);
2603         intel_runtime_pm_put(dev_priv);
2604 out_unlock:
2605         mutex_unlock(&dev->struct_mutex);
2606
2607 out_rearm:
2608         if (rearm_hangcheck) {
2609                 GEM_BUG_ON(!dev_priv->gt.awake);
2610                 i915_queue_hangcheck(dev_priv);
2611         }
2612 }
2613
2614 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2615 {
2616         struct drm_i915_gem_object *obj = to_intel_bo(gem);
2617         struct drm_i915_file_private *fpriv = file->driver_priv;
2618         struct i915_vma *vma, *vn;
2619
2620         mutex_lock(&obj->base.dev->struct_mutex);
2621         list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2622                 if (vma->vm->file == fpriv)
2623                         i915_vma_close(vma);
2624         mutex_unlock(&obj->base.dev->struct_mutex);
2625 }
2626
2627 /**
2628  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2629  * @dev: drm device pointer
2630  * @data: ioctl data blob
2631  * @file: drm file pointer
2632  *
2633  * Returns 0 if successful, else an error is returned with the remaining time in
2634  * the timeout parameter.
2635  *  -ETIME: object is still busy after timeout
2636  *  -ERESTARTSYS: signal interrupted the wait
2637  *  -ENONENT: object doesn't exist
2638  * Also possible, but rare:
2639  *  -EAGAIN: GPU wedged
2640  *  -ENOMEM: damn
2641  *  -ENODEV: Internal IRQ fail
2642  *  -E?: The add request failed
2643  *
2644  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2645  * non-zero timeout parameter the wait ioctl will wait for the given number of
2646  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2647  * without holding struct_mutex the object may become re-busied before this
2648  * function completes. A similar but shorter * race condition exists in the busy
2649  * ioctl
2650  */
2651 int
2652 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2653 {
2654         struct drm_i915_gem_wait *args = data;
2655         struct drm_i915_gem_object *obj;
2656         struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
2657         int i, n = 0;
2658         int ret;
2659
2660         if (args->flags != 0)
2661                 return -EINVAL;
2662
2663         ret = i915_mutex_lock_interruptible(dev);
2664         if (ret)
2665                 return ret;
2666
2667         obj = i915_gem_object_lookup(file, args->bo_handle);
2668         if (!obj) {
2669                 mutex_unlock(&dev->struct_mutex);
2670                 return -ENOENT;
2671         }
2672
2673         if (!obj->active)
2674                 goto out;
2675
2676         for (i = 0; i < I915_NUM_ENGINES; i++) {
2677                 struct drm_i915_gem_request *req;
2678
2679                 req = i915_gem_active_get(&obj->last_read[i],
2680                                           &obj->base.dev->struct_mutex);
2681                 if (req)
2682                         requests[n++] = req;
2683         }
2684
2685 out:
2686         i915_gem_object_put(obj);
2687         mutex_unlock(&dev->struct_mutex);
2688
2689         for (i = 0; i < n; i++) {
2690                 if (ret == 0)
2691                         ret = i915_wait_request(requests[i], true,
2692                                                 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2693                                                 to_rps_client(file));
2694                 i915_gem_request_put(requests[i]);
2695         }
2696         return ret;
2697 }
2698
2699 static int
2700 __i915_gem_object_sync(struct drm_i915_gem_request *to,
2701                        struct drm_i915_gem_request *from)
2702 {
2703         int ret;
2704
2705         if (to->engine == from->engine)
2706                 return 0;
2707
2708         if (!i915.semaphores) {
2709                 ret = i915_wait_request(from,
2710                                         from->i915->mm.interruptible,
2711                                         NULL,
2712                                         NO_WAITBOOST);
2713                 if (ret)
2714                         return ret;
2715         } else {
2716                 int idx = intel_engine_sync_index(from->engine, to->engine);
2717                 if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
2718                         return 0;
2719
2720                 trace_i915_gem_ring_sync_to(to, from);
2721                 ret = to->engine->semaphore.sync_to(to, from);
2722                 if (ret)
2723                         return ret;
2724
2725                 from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
2726         }
2727
2728         return 0;
2729 }
2730
2731 /**
2732  * i915_gem_object_sync - sync an object to a ring.
2733  *
2734  * @obj: object which may be in use on another ring.
2735  * @to: request we are wishing to use
2736  *
2737  * This code is meant to abstract object synchronization with the GPU.
2738  * Conceptually we serialise writes between engines inside the GPU.
2739  * We only allow one engine to write into a buffer at any time, but
2740  * multiple readers. To ensure each has a coherent view of memory, we must:
2741  *
2742  * - If there is an outstanding write request to the object, the new
2743  *   request must wait for it to complete (either CPU or in hw, requests
2744  *   on the same ring will be naturally ordered).
2745  *
2746  * - If we are a write request (pending_write_domain is set), the new
2747  *   request must wait for outstanding read requests to complete.
2748  *
2749  * Returns 0 if successful, else propagates up the lower layer error.
2750  */
2751 int
2752 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2753                      struct drm_i915_gem_request *to)
2754 {
2755         struct i915_gem_active *active;
2756         unsigned long active_mask;
2757         int idx;
2758
2759         lockdep_assert_held(&obj->base.dev->struct_mutex);
2760
2761         active_mask = obj->active;
2762         if (!active_mask)
2763                 return 0;
2764
2765         if (obj->base.pending_write_domain) {
2766                 active = obj->last_read;
2767         } else {
2768                 active_mask = 1;
2769                 active = &obj->last_write;
2770         }
2771
2772         for_each_active(active_mask, idx) {
2773                 struct drm_i915_gem_request *request;
2774                 int ret;
2775
2776                 request = i915_gem_active_peek(&active[idx],
2777                                                &obj->base.dev->struct_mutex);
2778                 if (!request)
2779                         continue;
2780
2781                 ret = __i915_gem_object_sync(to, request);
2782                 if (ret)
2783                         return ret;
2784         }
2785
2786         return 0;
2787 }
2788
2789 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2790 {
2791         u32 old_write_domain, old_read_domains;
2792
2793         /* Force a pagefault for domain tracking on next user access */
2794         i915_gem_release_mmap(obj);
2795
2796         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2797                 return;
2798
2799         old_read_domains = obj->base.read_domains;
2800         old_write_domain = obj->base.write_domain;
2801
2802         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2803         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2804
2805         trace_i915_gem_object_change_domain(obj,
2806                                             old_read_domains,
2807                                             old_write_domain);
2808 }
2809
2810 static void __i915_vma_iounmap(struct i915_vma *vma)
2811 {
2812         GEM_BUG_ON(vma->pin_count);
2813
2814         if (vma->iomap == NULL)
2815                 return;
2816
2817         io_mapping_unmap(vma->iomap);
2818         vma->iomap = NULL;
2819 }
2820
2821 int i915_vma_unbind(struct i915_vma *vma)
2822 {
2823         struct drm_i915_gem_object *obj = vma->obj;
2824         unsigned long active;
2825         int ret;
2826
2827         /* First wait upon any activity as retiring the request may
2828          * have side-effects such as unpinning or even unbinding this vma.
2829          */
2830         active = i915_vma_get_active(vma);
2831         if (active) {
2832                 int idx;
2833
2834                 /* When a closed VMA is retired, it is unbound - eek.
2835                  * In order to prevent it from being recursively closed,
2836                  * take a pin on the vma so that the second unbind is
2837                  * aborted.
2838                  */
2839                 vma->pin_count++;
2840
2841                 for_each_active(active, idx) {
2842                         ret = i915_gem_active_retire(&vma->last_read[idx],
2843                                                    &vma->vm->dev->struct_mutex);
2844                         if (ret)
2845                                 break;
2846                 }
2847
2848                 vma->pin_count--;
2849                 if (ret)
2850                         return ret;
2851
2852                 GEM_BUG_ON(i915_vma_is_active(vma));
2853         }
2854
2855         if (vma->pin_count)
2856                 return -EBUSY;
2857
2858         if (!drm_mm_node_allocated(&vma->node))
2859                 goto destroy;
2860
2861         GEM_BUG_ON(obj->bind_count == 0);
2862         GEM_BUG_ON(!obj->pages);
2863
2864         if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2865                 i915_gem_object_finish_gtt(obj);
2866
2867                 /* release the fence reg _after_ flushing */
2868                 ret = i915_gem_object_put_fence(obj);
2869                 if (ret)
2870                         return ret;
2871
2872                 __i915_vma_iounmap(vma);
2873         }
2874
2875         if (likely(!vma->vm->closed)) {
2876                 trace_i915_vma_unbind(vma);
2877                 vma->vm->unbind_vma(vma);
2878         }
2879         vma->bound = 0;
2880
2881         drm_mm_remove_node(&vma->node);
2882         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2883
2884         if (vma->is_ggtt) {
2885                 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2886                         obj->map_and_fenceable = false;
2887                 } else if (vma->ggtt_view.pages) {
2888                         sg_free_table(vma->ggtt_view.pages);
2889                         kfree(vma->ggtt_view.pages);
2890                 }
2891                 vma->ggtt_view.pages = NULL;
2892         }
2893
2894         /* Since the unbound list is global, only move to that list if
2895          * no more VMAs exist. */
2896         if (--obj->bind_count == 0)
2897                 list_move_tail(&obj->global_list,
2898                                &to_i915(obj->base.dev)->mm.unbound_list);
2899
2900         /* And finally now the object is completely decoupled from this vma,
2901          * we can drop its hold on the backing storage and allow it to be
2902          * reaped by the shrinker.
2903          */
2904         i915_gem_object_unpin_pages(obj);
2905
2906 destroy:
2907         if (unlikely(vma->closed))
2908                 i915_vma_destroy(vma);
2909
2910         return 0;
2911 }
2912
2913 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
2914 {
2915         struct intel_engine_cs *engine;
2916         int ret;
2917
2918         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2919
2920         for_each_engine(engine, dev_priv) {
2921                 if (engine->last_context == NULL)
2922                         continue;
2923
2924                 ret = intel_engine_idle(engine);
2925                 if (ret)
2926                         return ret;
2927         }
2928
2929         return 0;
2930 }
2931
2932 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2933                                      unsigned long cache_level)
2934 {
2935         struct drm_mm_node *gtt_space = &vma->node;
2936         struct drm_mm_node *other;
2937
2938         /*
2939          * On some machines we have to be careful when putting differing types
2940          * of snoopable memory together to avoid the prefetcher crossing memory
2941          * domains and dying. During vm initialisation, we decide whether or not
2942          * these constraints apply and set the drm_mm.color_adjust
2943          * appropriately.
2944          */
2945         if (vma->vm->mm.color_adjust == NULL)
2946                 return true;
2947
2948         if (!drm_mm_node_allocated(gtt_space))
2949                 return true;
2950
2951         if (list_empty(&gtt_space->node_list))
2952                 return true;
2953
2954         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2955         if (other->allocated && !other->hole_follows && other->color != cache_level)
2956                 return false;
2957
2958         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2959         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2960                 return false;
2961
2962         return true;
2963 }
2964
2965 /**
2966  * Finds free space in the GTT aperture and binds the object or a view of it
2967  * there.
2968  * @obj: object to bind
2969  * @vm: address space to bind into
2970  * @ggtt_view: global gtt view if applicable
2971  * @size: requested size in bytes (can be larger than the VMA)
2972  * @alignment: requested alignment
2973  * @flags: mask of PIN_* flags to use
2974  */
2975 static struct i915_vma *
2976 i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
2977                                struct i915_address_space *vm,
2978                                const struct i915_ggtt_view *ggtt_view,
2979                                u64 size,
2980                                u64 alignment,
2981                                u64 flags)
2982 {
2983         struct drm_device *dev = obj->base.dev;
2984         struct drm_i915_private *dev_priv = to_i915(dev);
2985         u64 start, end;
2986         u32 search_flag, alloc_flag;
2987         struct i915_vma *vma;
2988         int ret;
2989
2990         if (i915_is_ggtt(vm)) {
2991                 u32 fence_size, fence_alignment, unfenced_alignment;
2992                 u64 view_size;
2993
2994                 if (WARN_ON(!ggtt_view))
2995                         return ERR_PTR(-EINVAL);
2996
2997                 view_size = i915_ggtt_view_size(obj, ggtt_view);
2998
2999                 fence_size = i915_gem_get_ggtt_size(dev_priv,
3000                                                     view_size,
3001                                                     obj->tiling_mode);
3002                 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3003                                                               view_size,
3004                                                               obj->tiling_mode,
3005                                                               true);
3006                 unfenced_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3007                                                                  view_size,
3008                                                                  obj->tiling_mode,
3009                                                                  false);
3010                 size = max(size, view_size);
3011                 if (flags & PIN_MAPPABLE)
3012                         size = max_t(u64, size, fence_size);
3013
3014                 if (alignment == 0)
3015                         alignment = flags & PIN_MAPPABLE ? fence_alignment :
3016                                 unfenced_alignment;
3017                 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3018                         DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
3019                                   ggtt_view ? ggtt_view->type : 0,
3020                                   alignment);
3021                         return ERR_PTR(-EINVAL);
3022                 }
3023         } else {
3024                 size = max_t(u64, size, obj->base.size);
3025                 alignment = 4096;
3026         }
3027
3028         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3029         end = vm->total;
3030         if (flags & PIN_MAPPABLE)
3031                 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3032         if (flags & PIN_ZONE_4G)
3033                 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3034
3035         /* If binding the object/GGTT view requires more space than the entire
3036          * aperture has, reject it early before evicting everything in a vain
3037          * attempt to find space.
3038          */
3039         if (size > end) {
3040                 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3041                           ggtt_view ? ggtt_view->type : 0,
3042                           size, obj->base.size,
3043                           flags & PIN_MAPPABLE ? "mappable" : "total",
3044                           end);
3045                 return ERR_PTR(-E2BIG);
3046         }
3047
3048         ret = i915_gem_object_get_pages(obj);
3049         if (ret)
3050                 return ERR_PTR(ret);
3051
3052         i915_gem_object_pin_pages(obj);
3053
3054         vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3055                           i915_gem_obj_lookup_or_create_vma(obj, vm);
3056
3057         if (IS_ERR(vma))
3058                 goto err_unpin;
3059
3060         if (flags & PIN_OFFSET_FIXED) {
3061                 uint64_t offset = flags & PIN_OFFSET_MASK;
3062
3063                 if (offset & (alignment - 1) || offset + size > end) {
3064                         ret = -EINVAL;
3065                         goto err_vma;
3066                 }
3067                 vma->node.start = offset;
3068                 vma->node.size = size;
3069                 vma->node.color = obj->cache_level;
3070                 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3071                 if (ret) {
3072                         ret = i915_gem_evict_for_vma(vma);
3073                         if (ret == 0)
3074                                 ret = drm_mm_reserve_node(&vm->mm, &vma->node);
3075                 }
3076                 if (ret)
3077                         goto err_vma;
3078         } else {
3079                 if (flags & PIN_HIGH) {
3080                         search_flag = DRM_MM_SEARCH_BELOW;
3081                         alloc_flag = DRM_MM_CREATE_TOP;
3082                 } else {
3083                         search_flag = DRM_MM_SEARCH_DEFAULT;
3084                         alloc_flag = DRM_MM_CREATE_DEFAULT;
3085                 }
3086
3087                 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3088                  * so we know that we always have a minimum alignment of 4096.
3089                  * The drm_mm range manager is optimised to return results
3090                  * with zero alignment, so where possible use the optimal
3091                  * path.
3092                  */
3093                 if (alignment <= 4096)
3094                         alignment = 0;
3095
3096 search_free:
3097                 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3098                                                           size, alignment,
3099                                                           obj->cache_level,
3100                                                           start, end,
3101                                                           search_flag,
3102                                                           alloc_flag);
3103                 if (ret) {
3104                         ret = i915_gem_evict_something(vm, size, alignment,
3105                                                        obj->cache_level,
3106                                                        start, end,
3107                                                        flags);
3108                         if (ret == 0)
3109                                 goto search_free;
3110
3111                         goto err_vma;
3112                 }
3113         }
3114         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3115
3116         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3117         list_move_tail(&vma->vm_link, &vm->inactive_list);
3118         obj->bind_count++;
3119
3120         return vma;
3121
3122 err_vma:
3123         vma = ERR_PTR(ret);
3124 err_unpin:
3125         i915_gem_object_unpin_pages(obj);
3126         return vma;
3127 }
3128
3129 bool
3130 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3131                         bool force)
3132 {
3133         /* If we don't have a page list set up, then we're not pinned
3134          * to GPU, and we can ignore the cache flush because it'll happen
3135          * again at bind time.
3136          */
3137         if (obj->pages == NULL)
3138                 return false;
3139
3140         /*
3141          * Stolen memory is always coherent with the GPU as it is explicitly
3142          * marked as wc by the system, or the system is cache-coherent.
3143          */
3144         if (obj->stolen || obj->phys_handle)
3145                 return false;
3146
3147         /* If the GPU is snooping the contents of the CPU cache,
3148          * we do not need to manually clear the CPU cache lines.  However,
3149          * the caches are only snooped when the render cache is
3150          * flushed/invalidated.  As we always have to emit invalidations
3151          * and flushes when moving into and out of the RENDER domain, correct
3152          * snooping behaviour occurs naturally as the result of our domain
3153          * tracking.
3154          */
3155         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3156                 obj->cache_dirty = true;
3157                 return false;
3158         }
3159
3160         trace_i915_gem_object_clflush(obj);
3161         drm_clflush_sg(obj->pages);
3162         obj->cache_dirty = false;
3163
3164         return true;
3165 }
3166
3167 /** Flushes the GTT write domain for the object if it's dirty. */
3168 static void
3169 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3170 {
3171         uint32_t old_write_domain;
3172
3173         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3174                 return;
3175
3176         /* No actual flushing is required for the GTT write domain.  Writes
3177          * to it immediately go to main memory as far as we know, so there's
3178          * no chipset flush.  It also doesn't land in render cache.
3179          *
3180          * However, we do have to enforce the order so that all writes through
3181          * the GTT land before any writes to the device, such as updates to
3182          * the GATT itself.
3183          */
3184         wmb();
3185
3186         old_write_domain = obj->base.write_domain;
3187         obj->base.write_domain = 0;
3188
3189         intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3190
3191         trace_i915_gem_object_change_domain(obj,
3192                                             obj->base.read_domains,
3193                                             old_write_domain);
3194 }
3195
3196 /** Flushes the CPU write domain for the object if it's dirty. */
3197 static void
3198 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3199 {
3200         uint32_t old_write_domain;
3201
3202         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3203                 return;
3204
3205         if (i915_gem_clflush_object(obj, obj->pin_display))
3206                 i915_gem_chipset_flush(to_i915(obj->base.dev));
3207
3208         old_write_domain = obj->base.write_domain;
3209         obj->base.write_domain = 0;
3210
3211         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3212
3213         trace_i915_gem_object_change_domain(obj,
3214                                             obj->base.read_domains,
3215                                             old_write_domain);
3216 }
3217
3218 /**
3219  * Moves a single object to the GTT read, and possibly write domain.
3220  * @obj: object to act on
3221  * @write: ask for write access or read only
3222  *
3223  * This function returns when the move is complete, including waiting on
3224  * flushes to occur.
3225  */
3226 int
3227 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3228 {
3229         uint32_t old_write_domain, old_read_domains;
3230         struct i915_vma *vma;
3231         int ret;
3232
3233         ret = i915_gem_object_wait_rendering(obj, !write);
3234         if (ret)
3235                 return ret;
3236
3237         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3238                 return 0;
3239
3240         /* Flush and acquire obj->pages so that we are coherent through
3241          * direct access in memory with previous cached writes through
3242          * shmemfs and that our cache domain tracking remains valid.
3243          * For example, if the obj->filp was moved to swap without us
3244          * being notified and releasing the pages, we would mistakenly
3245          * continue to assume that the obj remained out of the CPU cached
3246          * domain.
3247          */
3248         ret = i915_gem_object_get_pages(obj);
3249         if (ret)
3250                 return ret;
3251
3252         i915_gem_object_flush_cpu_write_domain(obj);
3253
3254         /* Serialise direct access to this object with the barriers for
3255          * coherent writes from the GPU, by effectively invalidating the
3256          * GTT domain upon first access.
3257          */
3258         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3259                 mb();
3260
3261         old_write_domain = obj->base.write_domain;
3262         old_read_domains = obj->base.read_domains;
3263
3264         /* It should now be out of any other write domains, and we can update
3265          * the domain values for our changes.
3266          */
3267         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3268         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3269         if (write) {
3270                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3271                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3272                 obj->dirty = 1;
3273         }
3274
3275         trace_i915_gem_object_change_domain(obj,
3276                                             old_read_domains,
3277                                             old_write_domain);
3278
3279         /* And bump the LRU for this access */
3280         vma = i915_gem_obj_to_ggtt(obj);
3281         if (vma &&
3282             drm_mm_node_allocated(&vma->node) &&
3283             !i915_vma_is_active(vma))
3284                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3285
3286         return 0;
3287 }
3288
3289 /**
3290  * Changes the cache-level of an object across all VMA.
3291  * @obj: object to act on
3292  * @cache_level: new cache level to set for the object
3293  *
3294  * After this function returns, the object will be in the new cache-level
3295  * across all GTT and the contents of the backing storage will be coherent,
3296  * with respect to the new cache-level. In order to keep the backing storage
3297  * coherent for all users, we only allow a single cache level to be set
3298  * globally on the object and prevent it from being changed whilst the
3299  * hardware is reading from the object. That is if the object is currently
3300  * on the scanout it will be set to uncached (or equivalent display
3301  * cache coherency) and all non-MOCS GPU access will also be uncached so
3302  * that all direct access to the scanout remains coherent.
3303  */
3304 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3305                                     enum i915_cache_level cache_level)
3306 {
3307         struct i915_vma *vma;
3308         int ret = 0;
3309
3310         if (obj->cache_level == cache_level)
3311                 goto out;
3312
3313         /* Inspect the list of currently bound VMA and unbind any that would
3314          * be invalid given the new cache-level. This is principally to
3315          * catch the issue of the CS prefetch crossing page boundaries and
3316          * reading an invalid PTE on older architectures.
3317          */
3318 restart:
3319         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3320                 if (!drm_mm_node_allocated(&vma->node))
3321                         continue;
3322
3323                 if (vma->pin_count) {
3324                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3325                         return -EBUSY;
3326                 }
3327
3328                 if (i915_gem_valid_gtt_space(vma, cache_level))
3329                         continue;
3330
3331                 ret = i915_vma_unbind(vma);
3332                 if (ret)
3333                         return ret;
3334
3335                 /* As unbinding may affect other elements in the
3336                  * obj->vma_list (due to side-effects from retiring
3337                  * an active vma), play safe and restart the iterator.
3338                  */
3339                 goto restart;
3340         }
3341
3342         /* We can reuse the existing drm_mm nodes but need to change the
3343          * cache-level on the PTE. We could simply unbind them all and
3344          * rebind with the correct cache-level on next use. However since
3345          * we already have a valid slot, dma mapping, pages etc, we may as
3346          * rewrite the PTE in the belief that doing so tramples upon less
3347          * state and so involves less work.
3348          */
3349         if (obj->bind_count) {
3350                 /* Before we change the PTE, the GPU must not be accessing it.
3351                  * If we wait upon the object, we know that all the bound
3352                  * VMA are no longer active.
3353                  */
3354                 ret = i915_gem_object_wait_rendering(obj, false);
3355                 if (ret)
3356                         return ret;
3357
3358                 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3359                         /* Access to snoopable pages through the GTT is
3360                          * incoherent and on some machines causes a hard
3361                          * lockup. Relinquish the CPU mmaping to force
3362                          * userspace to refault in the pages and we can
3363                          * then double check if the GTT mapping is still
3364                          * valid for that pointer access.
3365                          */
3366                         i915_gem_release_mmap(obj);
3367
3368                         /* As we no longer need a fence for GTT access,
3369                          * we can relinquish it now (and so prevent having
3370                          * to steal a fence from someone else on the next
3371                          * fence request). Note GPU activity would have
3372                          * dropped the fence as all snoopable access is
3373                          * supposed to be linear.
3374                          */
3375                         ret = i915_gem_object_put_fence(obj);
3376                         if (ret)
3377                                 return ret;
3378                 } else {
3379                         /* We either have incoherent backing store and
3380                          * so no GTT access or the architecture is fully
3381                          * coherent. In such cases, existing GTT mmaps
3382                          * ignore the cache bit in the PTE and we can
3383                          * rewrite it without confusing the GPU or having
3384                          * to force userspace to fault back in its mmaps.
3385                          */
3386                 }
3387
3388                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3389                         if (!drm_mm_node_allocated(&vma->node))
3390                                 continue;
3391
3392                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3393                         if (ret)
3394                                 return ret;
3395                 }
3396         }
3397
3398         list_for_each_entry(vma, &obj->vma_list, obj_link)
3399                 vma->node.color = cache_level;
3400         obj->cache_level = cache_level;
3401
3402 out:
3403         /* Flush the dirty CPU caches to the backing storage so that the
3404          * object is now coherent at its new cache level (with respect
3405          * to the access domain).
3406          */
3407         if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3408                 if (i915_gem_clflush_object(obj, true))
3409                         i915_gem_chipset_flush(to_i915(obj->base.dev));
3410         }
3411
3412         return 0;
3413 }
3414
3415 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3416                                struct drm_file *file)
3417 {
3418         struct drm_i915_gem_caching *args = data;
3419         struct drm_i915_gem_object *obj;
3420
3421         obj = i915_gem_object_lookup(file, args->handle);
3422         if (!obj)
3423                 return -ENOENT;
3424
3425         switch (obj->cache_level) {
3426         case I915_CACHE_LLC:
3427         case I915_CACHE_L3_LLC:
3428                 args->caching = I915_CACHING_CACHED;
3429                 break;
3430
3431         case I915_CACHE_WT:
3432                 args->caching = I915_CACHING_DISPLAY;
3433                 break;
3434
3435         default:
3436                 args->caching = I915_CACHING_NONE;
3437                 break;
3438         }
3439
3440         i915_gem_object_put_unlocked(obj);
3441         return 0;
3442 }
3443
3444 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3445                                struct drm_file *file)
3446 {
3447         struct drm_i915_private *dev_priv = to_i915(dev);
3448         struct drm_i915_gem_caching *args = data;
3449         struct drm_i915_gem_object *obj;
3450         enum i915_cache_level level;
3451         int ret;
3452
3453         switch (args->caching) {
3454         case I915_CACHING_NONE:
3455                 level = I915_CACHE_NONE;
3456                 break;
3457         case I915_CACHING_CACHED:
3458                 /*
3459                  * Due to a HW issue on BXT A stepping, GPU stores via a
3460                  * snooped mapping may leave stale data in a corresponding CPU
3461                  * cacheline, whereas normally such cachelines would get
3462                  * invalidated.
3463                  */
3464                 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3465                         return -ENODEV;
3466
3467                 level = I915_CACHE_LLC;
3468                 break;
3469         case I915_CACHING_DISPLAY:
3470                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3471                 break;
3472         default:
3473                 return -EINVAL;
3474         }
3475
3476         intel_runtime_pm_get(dev_priv);
3477
3478         ret = i915_mutex_lock_interruptible(dev);
3479         if (ret)
3480                 goto rpm_put;
3481
3482         obj = i915_gem_object_lookup(file, args->handle);
3483         if (!obj) {
3484                 ret = -ENOENT;
3485                 goto unlock;
3486         }
3487
3488         ret = i915_gem_object_set_cache_level(obj, level);
3489
3490         i915_gem_object_put(obj);
3491 unlock:
3492         mutex_unlock(&dev->struct_mutex);
3493 rpm_put:
3494         intel_runtime_pm_put(dev_priv);
3495
3496         return ret;
3497 }
3498
3499 /*
3500  * Prepare buffer for display plane (scanout, cursors, etc).
3501  * Can be called from an uninterruptible phase (modesetting) and allows
3502  * any flushes to be pipelined (for pageflips).
3503  */
3504 int
3505 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3506                                      u32 alignment,
3507                                      const struct i915_ggtt_view *view)
3508 {
3509         u32 old_read_domains, old_write_domain;
3510         int ret;
3511
3512         /* Mark the pin_display early so that we account for the
3513          * display coherency whilst setting up the cache domains.
3514          */
3515         obj->pin_display++;
3516
3517         /* The display engine is not coherent with the LLC cache on gen6.  As
3518          * a result, we make sure that the pinning that is about to occur is
3519          * done with uncached PTEs. This is lowest common denominator for all
3520          * chipsets.
3521          *
3522          * However for gen6+, we could do better by using the GFDT bit instead
3523          * of uncaching, which would allow us to flush all the LLC-cached data
3524          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3525          */
3526         ret = i915_gem_object_set_cache_level(obj,
3527                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3528         if (ret)
3529                 goto err_unpin_display;
3530
3531         /* As the user may map the buffer once pinned in the display plane
3532          * (e.g. libkms for the bootup splash), we have to ensure that we
3533          * always use map_and_fenceable for all scanout buffers.
3534          */
3535         ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3536                                        view->type == I915_GGTT_VIEW_NORMAL ?
3537                                        PIN_MAPPABLE : 0);
3538         if (ret)
3539                 goto err_unpin_display;
3540
3541         i915_gem_object_flush_cpu_write_domain(obj);
3542
3543         old_write_domain = obj->base.write_domain;
3544         old_read_domains = obj->base.read_domains;
3545
3546         /* It should now be out of any other write domains, and we can update
3547          * the domain values for our changes.
3548          */
3549         obj->base.write_domain = 0;
3550         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3551
3552         trace_i915_gem_object_change_domain(obj,
3553                                             old_read_domains,
3554                                             old_write_domain);
3555
3556         return 0;
3557
3558 err_unpin_display:
3559         obj->pin_display--;
3560         return ret;
3561 }
3562
3563 void
3564 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3565                                          const struct i915_ggtt_view *view)
3566 {
3567         if (WARN_ON(obj->pin_display == 0))
3568                 return;
3569
3570         i915_gem_object_ggtt_unpin_view(obj, view);
3571
3572         obj->pin_display--;
3573 }
3574
3575 /**
3576  * Moves a single object to the CPU read, and possibly write domain.
3577  * @obj: object to act on
3578  * @write: requesting write or read-only access
3579  *
3580  * This function returns when the move is complete, including waiting on
3581  * flushes to occur.
3582  */
3583 int
3584 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3585 {
3586         uint32_t old_write_domain, old_read_domains;
3587         int ret;
3588
3589         ret = i915_gem_object_wait_rendering(obj, !write);
3590         if (ret)
3591                 return ret;
3592
3593         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3594                 return 0;
3595
3596         i915_gem_object_flush_gtt_write_domain(obj);
3597
3598         old_write_domain = obj->base.write_domain;
3599         old_read_domains = obj->base.read_domains;
3600
3601         /* Flush the CPU cache if it's still invalid. */
3602         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3603                 i915_gem_clflush_object(obj, false);
3604
3605                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3606         }
3607
3608         /* It should now be out of any other write domains, and we can update
3609          * the domain values for our changes.
3610          */
3611         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3612
3613         /* If we're writing through the CPU, then the GPU read domains will
3614          * need to be invalidated at next use.
3615          */
3616         if (write) {
3617                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3618                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3619         }
3620
3621         trace_i915_gem_object_change_domain(obj,
3622                                             old_read_domains,
3623                                             old_write_domain);
3624
3625         return 0;
3626 }
3627
3628 /* Throttle our rendering by waiting until the ring has completed our requests
3629  * emitted over 20 msec ago.
3630  *
3631  * Note that if we were to use the current jiffies each time around the loop,
3632  * we wouldn't escape the function with any frames outstanding if the time to
3633  * render a frame was over 20ms.
3634  *
3635  * This should get us reasonable parallelism between CPU and GPU but also
3636  * relatively low latency when blocking on a particular request to finish.
3637  */
3638 static int
3639 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3640 {
3641         struct drm_i915_private *dev_priv = to_i915(dev);
3642         struct drm_i915_file_private *file_priv = file->driver_priv;
3643         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3644         struct drm_i915_gem_request *request, *target = NULL;
3645         int ret;
3646
3647         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3648         if (ret)
3649                 return ret;
3650
3651         /* ABI: return -EIO if already wedged */
3652         if (i915_terminally_wedged(&dev_priv->gpu_error))
3653                 return -EIO;
3654
3655         spin_lock(&file_priv->mm.lock);
3656         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3657                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3658                         break;
3659
3660                 /*
3661                  * Note that the request might not have been submitted yet.
3662                  * In which case emitted_jiffies will be zero.
3663                  */
3664                 if (!request->emitted_jiffies)
3665                         continue;
3666
3667                 target = request;
3668         }
3669         if (target)
3670                 i915_gem_request_get(target);
3671         spin_unlock(&file_priv->mm.lock);
3672
3673         if (target == NULL)
3674                 return 0;
3675
3676         ret = i915_wait_request(target, true, NULL, NULL);
3677         i915_gem_request_put(target);
3678
3679         return ret;
3680 }
3681
3682 static bool
3683 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3684 {
3685         struct drm_i915_gem_object *obj = vma->obj;
3686
3687         if (vma->node.size < size)
3688                 return true;
3689
3690         if (alignment && vma->node.start & (alignment - 1))
3691                 return true;
3692
3693         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3694                 return true;
3695
3696         if (flags & PIN_OFFSET_BIAS &&
3697             vma->node.start < (flags & PIN_OFFSET_MASK))
3698                 return true;
3699
3700         if (flags & PIN_OFFSET_FIXED &&
3701             vma->node.start != (flags & PIN_OFFSET_MASK))
3702                 return true;
3703
3704         return false;
3705 }
3706
3707 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3708 {
3709         struct drm_i915_gem_object *obj = vma->obj;
3710         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3711         bool mappable, fenceable;
3712         u32 fence_size, fence_alignment;
3713
3714         fence_size = i915_gem_get_ggtt_size(dev_priv,
3715                                             obj->base.size,
3716                                             obj->tiling_mode);
3717         fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3718                                                       obj->base.size,
3719                                                       obj->tiling_mode,
3720                                                       true);
3721
3722         fenceable = (vma->node.size == fence_size &&
3723                      (vma->node.start & (fence_alignment - 1)) == 0);
3724
3725         mappable = (vma->node.start + fence_size <=
3726                     dev_priv->ggtt.mappable_end);
3727
3728         obj->map_and_fenceable = mappable && fenceable;
3729 }
3730
3731 static int
3732 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
3733                        struct i915_address_space *vm,
3734                        const struct i915_ggtt_view *ggtt_view,
3735                        u64 size,
3736                        u64 alignment,
3737                        u64 flags)
3738 {
3739         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3740         struct i915_vma *vma;
3741         unsigned bound;
3742         int ret;
3743
3744         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3745                 return -ENODEV;
3746
3747         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
3748                 return -EINVAL;
3749
3750         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
3751                 return -EINVAL;
3752
3753         if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3754                 return -EINVAL;
3755
3756         vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
3757                           i915_gem_obj_to_vma(obj, vm);
3758
3759         if (vma) {
3760                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3761                         return -EBUSY;
3762
3763                 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3764                         WARN(vma->pin_count,
3765                              "bo is already pinned in %s with incorrect alignment:"
3766                              " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
3767                              " obj->map_and_fenceable=%d\n",
3768                              ggtt_view ? "ggtt" : "ppgtt",
3769                              upper_32_bits(vma->node.start),
3770                              lower_32_bits(vma->node.start),
3771                              alignment,
3772                              !!(flags & PIN_MAPPABLE),
3773                              obj->map_and_fenceable);
3774                         ret = i915_vma_unbind(vma);
3775                         if (ret)
3776                                 return ret;
3777
3778                         vma = NULL;
3779                 }
3780         }
3781
3782         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
3783                 vma = i915_gem_object_insert_into_vm(obj, vm, ggtt_view,
3784                                                      size, alignment, flags);
3785                 if (IS_ERR(vma))
3786                         return PTR_ERR(vma);
3787         }
3788
3789         bound = vma->bound;
3790         ret = i915_vma_bind(vma, obj->cache_level, flags);
3791         if (ret)
3792                 return ret;
3793
3794         if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
3795             (bound ^ vma->bound) & GLOBAL_BIND) {
3796                 __i915_vma_set_map_and_fenceable(vma);
3797                 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3798         }
3799
3800         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3801
3802         vma->pin_count++;
3803         return 0;
3804 }
3805
3806 int
3807 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3808                     struct i915_address_space *vm,
3809                     u64 size,
3810                     u64 alignment,
3811                     u64 flags)
3812 {
3813         return i915_gem_object_do_pin(obj, vm,
3814                                       i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
3815                                       size, alignment, flags);
3816 }
3817
3818 int
3819 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3820                          const struct i915_ggtt_view *view,
3821                          u64 size,
3822                          u64 alignment,
3823                          u64 flags)
3824 {
3825         struct drm_device *dev = obj->base.dev;
3826         struct drm_i915_private *dev_priv = to_i915(dev);
3827         struct i915_ggtt *ggtt = &dev_priv->ggtt;
3828
3829         BUG_ON(!view);
3830
3831         return i915_gem_object_do_pin(obj, &ggtt->base, view,
3832                                       size, alignment, flags | PIN_GLOBAL);
3833 }
3834
3835 void
3836 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3837                                 const struct i915_ggtt_view *view)
3838 {
3839         struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
3840
3841         WARN_ON(vma->pin_count == 0);
3842         WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
3843
3844         --vma->pin_count;
3845 }
3846
3847 int
3848 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3849                     struct drm_file *file)
3850 {
3851         struct drm_i915_gem_busy *args = data;
3852         struct drm_i915_gem_object *obj;
3853         int ret;
3854
3855         ret = i915_mutex_lock_interruptible(dev);
3856         if (ret)
3857                 return ret;
3858
3859         obj = i915_gem_object_lookup(file, args->handle);
3860         if (!obj) {
3861                 ret = -ENOENT;
3862                 goto unlock;
3863         }
3864
3865         /* Count all active objects as busy, even if they are currently not used
3866          * by the gpu. Users of this interface expect objects to eventually
3867          * become non-busy without any further actions.
3868          */
3869         args->busy = 0;
3870         if (obj->active) {
3871                 struct drm_i915_gem_request *req;
3872                 int i;
3873
3874                 for (i = 0; i < I915_NUM_ENGINES; i++) {
3875                         req = i915_gem_active_peek(&obj->last_read[i],
3876                                                    &obj->base.dev->struct_mutex);
3877                         if (req)
3878                                 args->busy |= 1 << (16 + req->engine->exec_id);
3879                 }
3880                 req = i915_gem_active_peek(&obj->last_write,
3881                                            &obj->base.dev->struct_mutex);
3882                 if (req)
3883                         args->busy |= req->engine->exec_id;
3884         }
3885
3886         i915_gem_object_put(obj);
3887 unlock:
3888         mutex_unlock(&dev->struct_mutex);
3889         return ret;
3890 }
3891
3892 int
3893 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3894                         struct drm_file *file_priv)
3895 {
3896         return i915_gem_ring_throttle(dev, file_priv);
3897 }
3898
3899 int
3900 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3901                        struct drm_file *file_priv)
3902 {
3903         struct drm_i915_private *dev_priv = to_i915(dev);
3904         struct drm_i915_gem_madvise *args = data;
3905         struct drm_i915_gem_object *obj;
3906         int ret;
3907
3908         switch (args->madv) {
3909         case I915_MADV_DONTNEED:
3910         case I915_MADV_WILLNEED:
3911             break;
3912         default:
3913             return -EINVAL;
3914         }
3915
3916         ret = i915_mutex_lock_interruptible(dev);
3917         if (ret)
3918                 return ret;
3919
3920         obj = i915_gem_object_lookup(file_priv, args->handle);
3921         if (!obj) {
3922                 ret = -ENOENT;
3923                 goto unlock;
3924         }
3925
3926         if (i915_gem_obj_is_pinned(obj)) {
3927                 ret = -EINVAL;
3928                 goto out;
3929         }
3930
3931         if (obj->pages &&
3932             obj->tiling_mode != I915_TILING_NONE &&
3933             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3934                 if (obj->madv == I915_MADV_WILLNEED)
3935                         i915_gem_object_unpin_pages(obj);
3936                 if (args->madv == I915_MADV_WILLNEED)
3937                         i915_gem_object_pin_pages(obj);
3938         }
3939
3940         if (obj->madv != __I915_MADV_PURGED)
3941                 obj->madv = args->madv;
3942
3943         /* if the object is no longer attached, discard its backing storage */
3944         if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
3945                 i915_gem_object_truncate(obj);
3946
3947         args->retained = obj->madv != __I915_MADV_PURGED;
3948
3949 out:
3950         i915_gem_object_put(obj);
3951 unlock:
3952         mutex_unlock(&dev->struct_mutex);
3953         return ret;
3954 }
3955
3956 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3957                           const struct drm_i915_gem_object_ops *ops)
3958 {
3959         int i;
3960
3961         INIT_LIST_HEAD(&obj->global_list);
3962         for (i = 0; i < I915_NUM_ENGINES; i++)
3963                 init_request_active(&obj->last_read[i],
3964                                     i915_gem_object_retire__read);
3965         init_request_active(&obj->last_write,
3966                             i915_gem_object_retire__write);
3967         init_request_active(&obj->last_fence, NULL);
3968         INIT_LIST_HEAD(&obj->obj_exec_link);
3969         INIT_LIST_HEAD(&obj->vma_list);
3970         INIT_LIST_HEAD(&obj->batch_pool_link);
3971
3972         obj->ops = ops;
3973
3974         obj->fence_reg = I915_FENCE_REG_NONE;
3975         obj->madv = I915_MADV_WILLNEED;
3976
3977         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3978 }
3979
3980 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3981         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
3982         .get_pages = i915_gem_object_get_pages_gtt,
3983         .put_pages = i915_gem_object_put_pages_gtt,
3984 };
3985
3986 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
3987                                                   size_t size)
3988 {
3989         struct drm_i915_gem_object *obj;
3990         struct address_space *mapping;
3991         gfp_t mask;
3992         int ret;
3993
3994         obj = i915_gem_object_alloc(dev);
3995         if (obj == NULL)
3996                 return ERR_PTR(-ENOMEM);
3997
3998         ret = drm_gem_object_init(dev, &obj->base, size);
3999         if (ret)
4000                 goto fail;
4001
4002         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4003         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4004                 /* 965gm cannot relocate objects above 4GiB. */
4005                 mask &= ~__GFP_HIGHMEM;
4006                 mask |= __GFP_DMA32;
4007         }
4008
4009         mapping = file_inode(obj->base.filp)->i_mapping;
4010         mapping_set_gfp_mask(mapping, mask);
4011
4012         i915_gem_object_init(obj, &i915_gem_object_ops);
4013
4014         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4015         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4016
4017         if (HAS_LLC(dev)) {
4018                 /* On some devices, we can have the GPU use the LLC (the CPU
4019                  * cache) for about a 10% performance improvement
4020                  * compared to uncached.  Graphics requests other than
4021                  * display scanout are coherent with the CPU in
4022                  * accessing this cache.  This means in this mode we
4023                  * don't need to clflush on the CPU side, and on the
4024                  * GPU side we only need to flush internal caches to
4025                  * get data visible to the CPU.
4026                  *
4027                  * However, we maintain the display planes as UC, and so
4028                  * need to rebind when first used as such.
4029                  */
4030                 obj->cache_level = I915_CACHE_LLC;
4031         } else
4032                 obj->cache_level = I915_CACHE_NONE;
4033
4034         trace_i915_gem_object_create(obj);
4035
4036         return obj;
4037
4038 fail:
4039         i915_gem_object_free(obj);
4040
4041         return ERR_PTR(ret);
4042 }
4043
4044 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4045 {
4046         /* If we are the last user of the backing storage (be it shmemfs
4047          * pages or stolen etc), we know that the pages are going to be
4048          * immediately released. In this case, we can then skip copying
4049          * back the contents from the GPU.
4050          */
4051
4052         if (obj->madv != I915_MADV_WILLNEED)
4053                 return false;
4054
4055         if (obj->base.filp == NULL)
4056                 return true;
4057
4058         /* At first glance, this looks racy, but then again so would be
4059          * userspace racing mmap against close. However, the first external
4060          * reference to the filp can only be obtained through the
4061          * i915_gem_mmap_ioctl() which safeguards us against the user
4062          * acquiring such a reference whilst we are in the middle of
4063          * freeing the object.
4064          */
4065         return atomic_long_read(&obj->base.filp->f_count) == 1;
4066 }
4067
4068 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4069 {
4070         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4071         struct drm_device *dev = obj->base.dev;
4072         struct drm_i915_private *dev_priv = to_i915(dev);
4073         struct i915_vma *vma, *next;
4074
4075         intel_runtime_pm_get(dev_priv);
4076
4077         trace_i915_gem_object_destroy(obj);
4078
4079         /* All file-owned VMA should have been released by this point through
4080          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4081          * However, the object may also be bound into the global GTT (e.g.
4082          * older GPUs without per-process support, or for direct access through
4083          * the GTT either for the user or for scanout). Those VMA still need to
4084          * unbound now.
4085          */
4086         list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4087                 GEM_BUG_ON(!vma->is_ggtt);
4088                 GEM_BUG_ON(i915_vma_is_active(vma));
4089                 vma->pin_count = 0;
4090                 i915_vma_close(vma);
4091         }
4092         GEM_BUG_ON(obj->bind_count);
4093
4094         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4095          * before progressing. */
4096         if (obj->stolen)
4097                 i915_gem_object_unpin_pages(obj);
4098
4099         WARN_ON(obj->frontbuffer_bits);
4100
4101         if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4102             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4103             obj->tiling_mode != I915_TILING_NONE)
4104                 i915_gem_object_unpin_pages(obj);
4105
4106         if (WARN_ON(obj->pages_pin_count))
4107                 obj->pages_pin_count = 0;
4108         if (discard_backing_storage(obj))
4109                 obj->madv = I915_MADV_DONTNEED;
4110         i915_gem_object_put_pages(obj);
4111
4112         BUG_ON(obj->pages);
4113
4114         if (obj->base.import_attach)
4115                 drm_prime_gem_destroy(&obj->base, NULL);
4116
4117         if (obj->ops->release)
4118                 obj->ops->release(obj);
4119
4120         drm_gem_object_release(&obj->base);
4121         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4122
4123         kfree(obj->bit_17);
4124         i915_gem_object_free(obj);
4125
4126         intel_runtime_pm_put(dev_priv);
4127 }
4128
4129 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4130                                      struct i915_address_space *vm)
4131 {
4132         struct i915_vma *vma;
4133         list_for_each_entry(vma, &obj->vma_list, obj_link) {
4134                 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4135                     vma->vm == vm)
4136                         return vma;
4137         }
4138         return NULL;
4139 }
4140
4141 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4142                                            const struct i915_ggtt_view *view)
4143 {
4144         struct i915_vma *vma;
4145
4146         GEM_BUG_ON(!view);
4147
4148         list_for_each_entry(vma, &obj->vma_list, obj_link)
4149                 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4150                         return vma;
4151         return NULL;
4152 }
4153
4154 static void
4155 i915_gem_stop_engines(struct drm_device *dev)
4156 {
4157         struct drm_i915_private *dev_priv = to_i915(dev);
4158         struct intel_engine_cs *engine;
4159
4160         for_each_engine(engine, dev_priv)
4161                 dev_priv->gt.stop_engine(engine);
4162 }
4163
4164 int
4165 i915_gem_suspend(struct drm_device *dev)
4166 {
4167         struct drm_i915_private *dev_priv = to_i915(dev);
4168         int ret = 0;
4169
4170         intel_suspend_gt_powersave(dev_priv);
4171
4172         mutex_lock(&dev->struct_mutex);
4173
4174         /* We have to flush all the executing contexts to main memory so
4175          * that they can saved in the hibernation image. To ensure the last
4176          * context image is coherent, we have to switch away from it. That
4177          * leaves the dev_priv->kernel_context still active when
4178          * we actually suspend, and its image in memory may not match the GPU
4179          * state. Fortunately, the kernel_context is disposable and we do
4180          * not rely on its state.
4181          */
4182         ret = i915_gem_switch_to_kernel_context(dev_priv);
4183         if (ret)
4184                 goto err;
4185
4186         ret = i915_gem_wait_for_idle(dev_priv);
4187         if (ret)
4188                 goto err;
4189
4190         i915_gem_retire_requests(dev_priv);
4191
4192         /* Note that rather than stopping the engines, all we have to do
4193          * is assert that every RING_HEAD == RING_TAIL (all execution complete)
4194          * and similar for all logical context images (to ensure they are
4195          * all ready for hibernation).
4196          */
4197         i915_gem_stop_engines(dev);
4198         i915_gem_context_lost(dev_priv);
4199         mutex_unlock(&dev->struct_mutex);
4200
4201         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4202         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4203         flush_delayed_work(&dev_priv->gt.idle_work);
4204
4205         /* Assert that we sucessfully flushed all the work and
4206          * reset the GPU back to its idle, low power state.
4207          */
4208         WARN_ON(dev_priv->gt.awake);
4209
4210         return 0;
4211
4212 err:
4213         mutex_unlock(&dev->struct_mutex);
4214         return ret;
4215 }
4216
4217 void i915_gem_resume(struct drm_device *dev)
4218 {
4219         struct drm_i915_private *dev_priv = to_i915(dev);
4220
4221         mutex_lock(&dev->struct_mutex);
4222         i915_gem_restore_gtt_mappings(dev);
4223
4224         /* As we didn't flush the kernel context before suspend, we cannot
4225          * guarantee that the context image is complete. So let's just reset
4226          * it and start again.
4227          */
4228         if (i915.enable_execlists)
4229                 intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
4230
4231         mutex_unlock(&dev->struct_mutex);
4232 }
4233
4234 void i915_gem_init_swizzling(struct drm_device *dev)
4235 {
4236         struct drm_i915_private *dev_priv = to_i915(dev);
4237
4238         if (INTEL_INFO(dev)->gen < 5 ||
4239             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4240                 return;
4241
4242         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4243                                  DISP_TILE_SURFACE_SWIZZLING);
4244
4245         if (IS_GEN5(dev))
4246                 return;
4247
4248         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4249         if (IS_GEN6(dev))
4250                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4251         else if (IS_GEN7(dev))
4252                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4253         else if (IS_GEN8(dev))
4254                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4255         else
4256                 BUG();
4257 }
4258
4259 static void init_unused_ring(struct drm_device *dev, u32 base)
4260 {
4261         struct drm_i915_private *dev_priv = to_i915(dev);
4262
4263         I915_WRITE(RING_CTL(base), 0);
4264         I915_WRITE(RING_HEAD(base), 0);
4265         I915_WRITE(RING_TAIL(base), 0);
4266         I915_WRITE(RING_START(base), 0);
4267 }
4268
4269 static void init_unused_rings(struct drm_device *dev)
4270 {
4271         if (IS_I830(dev)) {
4272                 init_unused_ring(dev, PRB1_BASE);
4273                 init_unused_ring(dev, SRB0_BASE);
4274                 init_unused_ring(dev, SRB1_BASE);
4275                 init_unused_ring(dev, SRB2_BASE);
4276                 init_unused_ring(dev, SRB3_BASE);
4277         } else if (IS_GEN2(dev)) {
4278                 init_unused_ring(dev, SRB0_BASE);
4279                 init_unused_ring(dev, SRB1_BASE);
4280         } else if (IS_GEN3(dev)) {
4281                 init_unused_ring(dev, PRB1_BASE);
4282                 init_unused_ring(dev, PRB2_BASE);
4283         }
4284 }
4285
4286 int
4287 i915_gem_init_hw(struct drm_device *dev)
4288 {
4289         struct drm_i915_private *dev_priv = to_i915(dev);
4290         struct intel_engine_cs *engine;
4291         int ret;
4292
4293         /* Double layer security blanket, see i915_gem_init() */
4294         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4295
4296         if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4297                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4298
4299         if (IS_HASWELL(dev))
4300                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4301                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4302
4303         if (HAS_PCH_NOP(dev)) {
4304                 if (IS_IVYBRIDGE(dev)) {
4305                         u32 temp = I915_READ(GEN7_MSG_CTL);
4306                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4307                         I915_WRITE(GEN7_MSG_CTL, temp);
4308                 } else if (INTEL_INFO(dev)->gen >= 7) {
4309                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4310                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4311                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4312                 }
4313         }
4314
4315         i915_gem_init_swizzling(dev);
4316
4317         /*
4318          * At least 830 can leave some of the unused rings
4319          * "active" (ie. head != tail) after resume which
4320          * will prevent c3 entry. Makes sure all unused rings
4321          * are totally idle.
4322          */
4323         init_unused_rings(dev);
4324
4325         BUG_ON(!dev_priv->kernel_context);
4326
4327         ret = i915_ppgtt_init_hw(dev);
4328         if (ret) {
4329                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4330                 goto out;
4331         }
4332
4333         /* Need to do basic initialisation of all rings first: */
4334         for_each_engine(engine, dev_priv) {
4335                 ret = engine->init_hw(engine);
4336                 if (ret)
4337                         goto out;
4338         }
4339
4340         intel_mocs_init_l3cc_table(dev);
4341
4342         /* We can't enable contexts until all firmware is loaded */
4343         ret = intel_guc_setup(dev);
4344         if (ret)
4345                 goto out;
4346
4347 out:
4348         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4349         return ret;
4350 }
4351
4352 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4353 {
4354         if (INTEL_INFO(dev_priv)->gen < 6)
4355                 return false;
4356
4357         /* TODO: make semaphores and Execlists play nicely together */
4358         if (i915.enable_execlists)
4359                 return false;
4360
4361         if (value >= 0)
4362                 return value;
4363
4364 #ifdef CONFIG_INTEL_IOMMU
4365         /* Enable semaphores on SNB when IO remapping is off */
4366         if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4367                 return false;
4368 #endif
4369
4370         return true;
4371 }
4372
4373 int i915_gem_init(struct drm_device *dev)
4374 {
4375         struct drm_i915_private *dev_priv = to_i915(dev);
4376         int ret;
4377
4378         mutex_lock(&dev->struct_mutex);
4379
4380         if (!i915.enable_execlists) {
4381                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4382                 dev_priv->gt.stop_engine = intel_engine_stop;
4383         } else {
4384                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4385                 dev_priv->gt.stop_engine = intel_logical_ring_stop;
4386         }
4387
4388         /* This is just a security blanket to placate dragons.
4389          * On some systems, we very sporadically observe that the first TLBs
4390          * used by the CS may be stale, despite us poking the TLB reset. If
4391          * we hold the forcewake during initialisation these problems
4392          * just magically go away.
4393          */
4394         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4395
4396         i915_gem_init_userptr(dev_priv);
4397
4398         ret = i915_gem_init_ggtt(dev_priv);
4399         if (ret)
4400                 goto out_unlock;
4401
4402         ret = i915_gem_context_init(dev);
4403         if (ret)
4404                 goto out_unlock;
4405
4406         ret = intel_engines_init(dev);
4407         if (ret)
4408                 goto out_unlock;
4409
4410         ret = i915_gem_init_hw(dev);
4411         if (ret == -EIO) {
4412                 /* Allow engine initialisation to fail by marking the GPU as
4413                  * wedged. But we only want to do this where the GPU is angry,
4414                  * for all other failure, such as an allocation failure, bail.
4415                  */
4416                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4417                 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4418                 ret = 0;
4419         }
4420
4421 out_unlock:
4422         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4423         mutex_unlock(&dev->struct_mutex);
4424
4425         return ret;
4426 }
4427
4428 void
4429 i915_gem_cleanup_engines(struct drm_device *dev)
4430 {
4431         struct drm_i915_private *dev_priv = to_i915(dev);
4432         struct intel_engine_cs *engine;
4433
4434         for_each_engine(engine, dev_priv)
4435                 dev_priv->gt.cleanup_engine(engine);
4436 }
4437
4438 static void
4439 init_engine_lists(struct intel_engine_cs *engine)
4440 {
4441         INIT_LIST_HEAD(&engine->request_list);
4442 }
4443
4444 void
4445 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4446 {
4447         struct drm_device *dev = &dev_priv->drm;
4448
4449         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4450             !IS_CHERRYVIEW(dev_priv))
4451                 dev_priv->num_fence_regs = 32;
4452         else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4453                  IS_I945GM(dev_priv) || IS_G33(dev_priv))
4454                 dev_priv->num_fence_regs = 16;
4455         else
4456                 dev_priv->num_fence_regs = 8;
4457
4458         if (intel_vgpu_active(dev_priv))
4459                 dev_priv->num_fence_regs =
4460                                 I915_READ(vgtif_reg(avail_rs.fence_num));
4461
4462         /* Initialize fence registers to zero */
4463         i915_gem_restore_fences(dev);
4464
4465         i915_gem_detect_bit_6_swizzle(dev);
4466 }
4467
4468 void
4469 i915_gem_load_init(struct drm_device *dev)
4470 {
4471         struct drm_i915_private *dev_priv = to_i915(dev);
4472         int i;
4473
4474         dev_priv->objects =
4475                 kmem_cache_create("i915_gem_object",
4476                                   sizeof(struct drm_i915_gem_object), 0,
4477                                   SLAB_HWCACHE_ALIGN,
4478                                   NULL);
4479         dev_priv->vmas =
4480                 kmem_cache_create("i915_gem_vma",
4481                                   sizeof(struct i915_vma), 0,
4482                                   SLAB_HWCACHE_ALIGN,
4483                                   NULL);
4484         dev_priv->requests =
4485                 kmem_cache_create("i915_gem_request",
4486                                   sizeof(struct drm_i915_gem_request), 0,
4487                                   SLAB_HWCACHE_ALIGN,
4488                                   NULL);
4489
4490         INIT_LIST_HEAD(&dev_priv->context_list);
4491         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4492         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4493         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4494         for (i = 0; i < I915_NUM_ENGINES; i++)
4495                 init_engine_lists(&dev_priv->engine[i]);
4496         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4497                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4498         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4499                           i915_gem_retire_work_handler);
4500         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4501                           i915_gem_idle_work_handler);
4502         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4503         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4504
4505         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4506
4507         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4508
4509         init_waitqueue_head(&dev_priv->pending_flip_queue);
4510
4511         dev_priv->mm.interruptible = true;
4512
4513         mutex_init(&dev_priv->fb_tracking.lock);
4514 }
4515
4516 void i915_gem_load_cleanup(struct drm_device *dev)
4517 {
4518         struct drm_i915_private *dev_priv = to_i915(dev);
4519
4520         kmem_cache_destroy(dev_priv->requests);
4521         kmem_cache_destroy(dev_priv->vmas);
4522         kmem_cache_destroy(dev_priv->objects);
4523 }
4524
4525 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4526 {
4527         struct drm_i915_gem_object *obj;
4528
4529         /* Called just before we write the hibernation image.
4530          *
4531          * We need to update the domain tracking to reflect that the CPU
4532          * will be accessing all the pages to create and restore from the
4533          * hibernation, and so upon restoration those pages will be in the
4534          * CPU domain.
4535          *
4536          * To make sure the hibernation image contains the latest state,
4537          * we update that state just before writing out the image.
4538          */
4539
4540         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
4541                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4542                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4543         }
4544
4545         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4546                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4547                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4548         }
4549
4550         return 0;
4551 }
4552
4553 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4554 {
4555         struct drm_i915_file_private *file_priv = file->driver_priv;
4556         struct drm_i915_gem_request *request;
4557
4558         /* Clean up our request list when the client is going away, so that
4559          * later retire_requests won't dereference our soon-to-be-gone
4560          * file_priv.
4561          */
4562         spin_lock(&file_priv->mm.lock);
4563         list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4564                 request->file_priv = NULL;
4565         spin_unlock(&file_priv->mm.lock);
4566
4567         if (!list_empty(&file_priv->rps.link)) {
4568                 spin_lock(&to_i915(dev)->rps.client_lock);
4569                 list_del(&file_priv->rps.link);
4570                 spin_unlock(&to_i915(dev)->rps.client_lock);
4571         }
4572 }
4573
4574 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4575 {
4576         struct drm_i915_file_private *file_priv;
4577         int ret;
4578
4579         DRM_DEBUG_DRIVER("\n");
4580
4581         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4582         if (!file_priv)
4583                 return -ENOMEM;
4584
4585         file->driver_priv = file_priv;
4586         file_priv->dev_priv = to_i915(dev);
4587         file_priv->file = file;
4588         INIT_LIST_HEAD(&file_priv->rps.link);
4589
4590         spin_lock_init(&file_priv->mm.lock);
4591         INIT_LIST_HEAD(&file_priv->mm.request_list);
4592
4593         file_priv->bsd_engine = -1;
4594
4595         ret = i915_gem_context_open(dev, file);
4596         if (ret)
4597                 kfree(file_priv);
4598
4599         return ret;
4600 }
4601
4602 /**
4603  * i915_gem_track_fb - update frontbuffer tracking
4604  * @old: current GEM buffer for the frontbuffer slots
4605  * @new: new GEM buffer for the frontbuffer slots
4606  * @frontbuffer_bits: bitmask of frontbuffer slots
4607  *
4608  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4609  * from @old and setting them in @new. Both @old and @new can be NULL.
4610  */
4611 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4612                        struct drm_i915_gem_object *new,
4613                        unsigned frontbuffer_bits)
4614 {
4615         if (old) {
4616                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
4617                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
4618                 old->frontbuffer_bits &= ~frontbuffer_bits;
4619         }
4620
4621         if (new) {
4622                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
4623                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
4624                 new->frontbuffer_bits |= frontbuffer_bits;
4625         }
4626 }
4627
4628 /* All the new VM stuff */
4629 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
4630                         struct i915_address_space *vm)
4631 {
4632         struct drm_i915_private *dev_priv = to_i915(o->base.dev);
4633         struct i915_vma *vma;
4634
4635         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
4636
4637         list_for_each_entry(vma, &o->vma_list, obj_link) {
4638                 if (vma->is_ggtt &&
4639                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4640                         continue;
4641                 if (vma->vm == vm)
4642                         return vma->node.start;
4643         }
4644
4645         WARN(1, "%s vma for this object not found.\n",
4646              i915_is_ggtt(vm) ? "global" : "ppgtt");
4647         return -1;
4648 }
4649
4650 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
4651                                   const struct i915_ggtt_view *view)
4652 {
4653         struct i915_vma *vma;
4654
4655         list_for_each_entry(vma, &o->vma_list, obj_link)
4656                 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4657                         return vma->node.start;
4658
4659         WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
4660         return -1;
4661 }
4662
4663 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4664                         struct i915_address_space *vm)
4665 {
4666         struct i915_vma *vma;
4667
4668         list_for_each_entry(vma, &o->vma_list, obj_link) {
4669                 if (vma->is_ggtt &&
4670                     vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4671                         continue;
4672                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4673                         return true;
4674         }
4675
4676         return false;
4677 }
4678
4679 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
4680                                   const struct i915_ggtt_view *view)
4681 {
4682         struct i915_vma *vma;
4683
4684         list_for_each_entry(vma, &o->vma_list, obj_link)
4685                 if (vma->is_ggtt &&
4686                     i915_ggtt_view_equal(&vma->ggtt_view, view) &&
4687                     drm_mm_node_allocated(&vma->node))
4688                         return true;
4689
4690         return false;
4691 }
4692
4693 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
4694 {
4695         struct i915_vma *vma;
4696
4697         GEM_BUG_ON(list_empty(&o->vma_list));
4698
4699         list_for_each_entry(vma, &o->vma_list, obj_link) {
4700                 if (vma->is_ggtt &&
4701                     vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
4702                         return vma->node.size;
4703         }
4704
4705         return 0;
4706 }
4707
4708 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
4709 {
4710         struct i915_vma *vma;
4711         list_for_each_entry(vma, &obj->vma_list, obj_link)
4712                 if (vma->pin_count > 0)
4713                         return true;
4714
4715         return false;
4716 }
4717
4718 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4719 struct page *
4720 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4721 {
4722         struct page *page;
4723
4724         /* Only default objects have per-page dirty tracking */
4725         if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4726                 return NULL;
4727
4728         page = i915_gem_object_get_page(obj, n);
4729         set_page_dirty(page);
4730         return page;
4731 }
4732
4733 /* Allocate a new GEM object and fill it with the supplied data */
4734 struct drm_i915_gem_object *
4735 i915_gem_object_create_from_data(struct drm_device *dev,
4736                                  const void *data, size_t size)
4737 {
4738         struct drm_i915_gem_object *obj;
4739         struct sg_table *sg;
4740         size_t bytes;
4741         int ret;
4742
4743         obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4744         if (IS_ERR(obj))
4745                 return obj;
4746
4747         ret = i915_gem_object_set_to_cpu_domain(obj, true);
4748         if (ret)
4749                 goto fail;
4750
4751         ret = i915_gem_object_get_pages(obj);
4752         if (ret)
4753                 goto fail;
4754
4755         i915_gem_object_pin_pages(obj);
4756         sg = obj->pages;
4757         bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4758         obj->dirty = 1;         /* Backing store is now out of date */
4759         i915_gem_object_unpin_pages(obj);
4760
4761         if (WARN_ON(bytes != size)) {
4762                 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4763                 ret = -EFAULT;
4764                 goto fail;
4765         }
4766
4767         return obj;
4768
4769 fail:
4770         i915_gem_object_put(obj);
4771         return ERR_PTR(ret);
4772 }