]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
f86a71d9fe372c942235180c25ede629407bc132
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include "intel_frontbuffer.h"
36 #include "intel_mocs.h"
37 #include <linux/dma-fence-array.h>
38 #include <linux/reservation.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/slab.h>
41 #include <linux/stop_machine.h>
42 #include <linux/swap.h>
43 #include <linux/pci.h>
44 #include <linux/dma-buf.h>
45
46 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
47 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
48 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
49
50 static bool cpu_cache_is_coherent(struct drm_device *dev,
51                                   enum i915_cache_level level)
52 {
53         return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
54 }
55
56 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
57 {
58         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
59                 return false;
60
61         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
62                 return true;
63
64         return obj->pin_display;
65 }
66
67 static int
68 insert_mappable_node(struct i915_ggtt *ggtt,
69                      struct drm_mm_node *node, u32 size)
70 {
71         memset(node, 0, sizeof(*node));
72         return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
73                                                    size, 0,
74                                                    I915_COLOR_UNEVICTABLE,
75                                                    0, ggtt->mappable_end,
76                                                    DRM_MM_SEARCH_DEFAULT,
77                                                    DRM_MM_CREATE_DEFAULT);
78 }
79
80 static void
81 remove_mappable_node(struct drm_mm_node *node)
82 {
83         drm_mm_remove_node(node);
84 }
85
86 /* some bookkeeping */
87 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
88                                   u64 size)
89 {
90         spin_lock(&dev_priv->mm.object_stat_lock);
91         dev_priv->mm.object_count++;
92         dev_priv->mm.object_memory += size;
93         spin_unlock(&dev_priv->mm.object_stat_lock);
94 }
95
96 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
97                                      u64 size)
98 {
99         spin_lock(&dev_priv->mm.object_stat_lock);
100         dev_priv->mm.object_count--;
101         dev_priv->mm.object_memory -= size;
102         spin_unlock(&dev_priv->mm.object_stat_lock);
103 }
104
105 static int
106 i915_gem_wait_for_error(struct i915_gpu_error *error)
107 {
108         int ret;
109
110         might_sleep();
111
112         if (!i915_reset_in_progress(error))
113                 return 0;
114
115         /*
116          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
117          * userspace. If it takes that long something really bad is going on and
118          * we should simply try to bail out and fail as gracefully as possible.
119          */
120         ret = wait_event_interruptible_timeout(error->reset_queue,
121                                                !i915_reset_in_progress(error),
122                                                I915_RESET_TIMEOUT);
123         if (ret == 0) {
124                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
125                 return -EIO;
126         } else if (ret < 0) {
127                 return ret;
128         } else {
129                 return 0;
130         }
131 }
132
133 int i915_mutex_lock_interruptible(struct drm_device *dev)
134 {
135         struct drm_i915_private *dev_priv = to_i915(dev);
136         int ret;
137
138         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
139         if (ret)
140                 return ret;
141
142         ret = mutex_lock_interruptible(&dev->struct_mutex);
143         if (ret)
144                 return ret;
145
146         return 0;
147 }
148
149 int
150 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
151                             struct drm_file *file)
152 {
153         struct drm_i915_private *dev_priv = to_i915(dev);
154         struct i915_ggtt *ggtt = &dev_priv->ggtt;
155         struct drm_i915_gem_get_aperture *args = data;
156         struct i915_vma *vma;
157         size_t pinned;
158
159         pinned = 0;
160         mutex_lock(&dev->struct_mutex);
161         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
162                 if (i915_vma_is_pinned(vma))
163                         pinned += vma->node.size;
164         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
165                 if (i915_vma_is_pinned(vma))
166                         pinned += vma->node.size;
167         mutex_unlock(&dev->struct_mutex);
168
169         args->aper_size = ggtt->base.total;
170         args->aper_available_size = args->aper_size - pinned;
171
172         return 0;
173 }
174
175 static struct sg_table *
176 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 {
178         struct address_space *mapping = obj->base.filp->f_mapping;
179         drm_dma_handle_t *phys;
180         struct sg_table *st;
181         struct scatterlist *sg;
182         char *vaddr;
183         int i;
184
185         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
186                 return ERR_PTR(-EINVAL);
187
188         /* Always aligning to the object size, allows a single allocation
189          * to handle all possible callers, and given typical object sizes,
190          * the alignment of the buddy allocation will naturally match.
191          */
192         phys = drm_pci_alloc(obj->base.dev,
193                              obj->base.size,
194                              roundup_pow_of_two(obj->base.size));
195         if (!phys)
196                 return ERR_PTR(-ENOMEM);
197
198         vaddr = phys->vaddr;
199         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
200                 struct page *page;
201                 char *src;
202
203                 page = shmem_read_mapping_page(mapping, i);
204                 if (IS_ERR(page)) {
205                         st = ERR_CAST(page);
206                         goto err_phys;
207                 }
208
209                 src = kmap_atomic(page);
210                 memcpy(vaddr, src, PAGE_SIZE);
211                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
212                 kunmap_atomic(src);
213
214                 put_page(page);
215                 vaddr += PAGE_SIZE;
216         }
217
218         i915_gem_chipset_flush(to_i915(obj->base.dev));
219
220         st = kmalloc(sizeof(*st), GFP_KERNEL);
221         if (!st) {
222                 st = ERR_PTR(-ENOMEM);
223                 goto err_phys;
224         }
225
226         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
227                 kfree(st);
228                 st = ERR_PTR(-ENOMEM);
229                 goto err_phys;
230         }
231
232         sg = st->sgl;
233         sg->offset = 0;
234         sg->length = obj->base.size;
235
236         sg_dma_address(sg) = phys->busaddr;
237         sg_dma_len(sg) = obj->base.size;
238
239         obj->phys_handle = phys;
240         return st;
241
242 err_phys:
243         drm_pci_free(obj->base.dev, phys);
244         return st;
245 }
246
247 static void
248 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
249                                 struct sg_table *pages)
250 {
251         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
252
253         if (obj->mm.madv == I915_MADV_DONTNEED)
254                 obj->mm.dirty = false;
255
256         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
257             !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
258                 drm_clflush_sg(pages);
259
260         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
261         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
262 }
263
264 static void
265 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
266                                struct sg_table *pages)
267 {
268         __i915_gem_object_release_shmem(obj, pages);
269
270         if (obj->mm.dirty) {
271                 struct address_space *mapping = obj->base.filp->f_mapping;
272                 char *vaddr = obj->phys_handle->vaddr;
273                 int i;
274
275                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
276                         struct page *page;
277                         char *dst;
278
279                         page = shmem_read_mapping_page(mapping, i);
280                         if (IS_ERR(page))
281                                 continue;
282
283                         dst = kmap_atomic(page);
284                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
285                         memcpy(dst, vaddr, PAGE_SIZE);
286                         kunmap_atomic(dst);
287
288                         set_page_dirty(page);
289                         if (obj->mm.madv == I915_MADV_WILLNEED)
290                                 mark_page_accessed(page);
291                         put_page(page);
292                         vaddr += PAGE_SIZE;
293                 }
294                 obj->mm.dirty = false;
295         }
296
297         sg_free_table(pages);
298         kfree(pages);
299
300         drm_pci_free(obj->base.dev, obj->phys_handle);
301 }
302
303 static void
304 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
305 {
306         i915_gem_object_unpin_pages(obj);
307 }
308
309 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
310         .get_pages = i915_gem_object_get_pages_phys,
311         .put_pages = i915_gem_object_put_pages_phys,
312         .release = i915_gem_object_release_phys,
313 };
314
315 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
316 {
317         struct i915_vma *vma;
318         LIST_HEAD(still_in_list);
319         int ret;
320
321         lockdep_assert_held(&obj->base.dev->struct_mutex);
322
323         /* Closed vma are removed from the obj->vma_list - but they may
324          * still have an active binding on the object. To remove those we
325          * must wait for all rendering to complete to the object (as unbinding
326          * must anyway), and retire the requests.
327          */
328         ret = i915_gem_object_wait(obj,
329                                    I915_WAIT_INTERRUPTIBLE |
330                                    I915_WAIT_LOCKED |
331                                    I915_WAIT_ALL,
332                                    MAX_SCHEDULE_TIMEOUT,
333                                    NULL);
334         if (ret)
335                 return ret;
336
337         i915_gem_retire_requests(to_i915(obj->base.dev));
338
339         while ((vma = list_first_entry_or_null(&obj->vma_list,
340                                                struct i915_vma,
341                                                obj_link))) {
342                 list_move_tail(&vma->obj_link, &still_in_list);
343                 ret = i915_vma_unbind(vma);
344                 if (ret)
345                         break;
346         }
347         list_splice(&still_in_list, &obj->vma_list);
348
349         return ret;
350 }
351
352 static long
353 i915_gem_object_wait_fence(struct dma_fence *fence,
354                            unsigned int flags,
355                            long timeout,
356                            struct intel_rps_client *rps)
357 {
358         struct drm_i915_gem_request *rq;
359
360         BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
361
362         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
363                 return timeout;
364
365         if (!dma_fence_is_i915(fence))
366                 return dma_fence_wait_timeout(fence,
367                                               flags & I915_WAIT_INTERRUPTIBLE,
368                                               timeout);
369
370         rq = to_request(fence);
371         if (i915_gem_request_completed(rq))
372                 goto out;
373
374         /* This client is about to stall waiting for the GPU. In many cases
375          * this is undesirable and limits the throughput of the system, as
376          * many clients cannot continue processing user input/output whilst
377          * blocked. RPS autotuning may take tens of milliseconds to respond
378          * to the GPU load and thus incurs additional latency for the client.
379          * We can circumvent that by promoting the GPU frequency to maximum
380          * before we wait. This makes the GPU throttle up much more quickly
381          * (good for benchmarks and user experience, e.g. window animations),
382          * but at a cost of spending more power processing the workload
383          * (bad for battery). Not all clients even want their results
384          * immediately and for them we should just let the GPU select its own
385          * frequency to maximise efficiency. To prevent a single client from
386          * forcing the clocks too high for the whole system, we only allow
387          * each client to waitboost once in a busy period.
388          */
389         if (rps) {
390                 if (INTEL_GEN(rq->i915) >= 6)
391                         gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
392                 else
393                         rps = NULL;
394         }
395
396         timeout = i915_wait_request(rq, flags, timeout);
397
398 out:
399         if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
400                 i915_gem_request_retire_upto(rq);
401
402         if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
403                 /* The GPU is now idle and this client has stalled.
404                  * Since no other client has submitted a request in the
405                  * meantime, assume that this client is the only one
406                  * supplying work to the GPU but is unable to keep that
407                  * work supplied because it is waiting. Since the GPU is
408                  * then never kept fully busy, RPS autoclocking will
409                  * keep the clocks relatively low, causing further delays.
410                  * Compensate by giving the synchronous client credit for
411                  * a waitboost next time.
412                  */
413                 spin_lock(&rq->i915->rps.client_lock);
414                 list_del_init(&rps->link);
415                 spin_unlock(&rq->i915->rps.client_lock);
416         }
417
418         return timeout;
419 }
420
421 static long
422 i915_gem_object_wait_reservation(struct reservation_object *resv,
423                                  unsigned int flags,
424                                  long timeout,
425                                  struct intel_rps_client *rps)
426 {
427         struct dma_fence *excl;
428
429         if (flags & I915_WAIT_ALL) {
430                 struct dma_fence **shared;
431                 unsigned int count, i;
432                 int ret;
433
434                 ret = reservation_object_get_fences_rcu(resv,
435                                                         &excl, &count, &shared);
436                 if (ret)
437                         return ret;
438
439                 for (i = 0; i < count; i++) {
440                         timeout = i915_gem_object_wait_fence(shared[i],
441                                                              flags, timeout,
442                                                              rps);
443                         if (timeout <= 0)
444                                 break;
445
446                         dma_fence_put(shared[i]);
447                 }
448
449                 for (; i < count; i++)
450                         dma_fence_put(shared[i]);
451                 kfree(shared);
452         } else {
453                 excl = reservation_object_get_excl_rcu(resv);
454         }
455
456         if (excl && timeout > 0)
457                 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
458
459         dma_fence_put(excl);
460
461         return timeout;
462 }
463
464 static void __fence_set_priority(struct dma_fence *fence, int prio)
465 {
466         struct drm_i915_gem_request *rq;
467         struct intel_engine_cs *engine;
468
469         if (!dma_fence_is_i915(fence))
470                 return;
471
472         rq = to_request(fence);
473         engine = rq->engine;
474         if (!engine->schedule)
475                 return;
476
477         engine->schedule(rq, prio);
478 }
479
480 static void fence_set_priority(struct dma_fence *fence, int prio)
481 {
482         /* Recurse once into a fence-array */
483         if (dma_fence_is_array(fence)) {
484                 struct dma_fence_array *array = to_dma_fence_array(fence);
485                 int i;
486
487                 for (i = 0; i < array->num_fences; i++)
488                         __fence_set_priority(array->fences[i], prio);
489         } else {
490                 __fence_set_priority(fence, prio);
491         }
492 }
493
494 int
495 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
496                               unsigned int flags,
497                               int prio)
498 {
499         struct dma_fence *excl;
500
501         if (flags & I915_WAIT_ALL) {
502                 struct dma_fence **shared;
503                 unsigned int count, i;
504                 int ret;
505
506                 ret = reservation_object_get_fences_rcu(obj->resv,
507                                                         &excl, &count, &shared);
508                 if (ret)
509                         return ret;
510
511                 for (i = 0; i < count; i++) {
512                         fence_set_priority(shared[i], prio);
513                         dma_fence_put(shared[i]);
514                 }
515
516                 kfree(shared);
517         } else {
518                 excl = reservation_object_get_excl_rcu(obj->resv);
519         }
520
521         if (excl) {
522                 fence_set_priority(excl, prio);
523                 dma_fence_put(excl);
524         }
525         return 0;
526 }
527
528 /**
529  * Waits for rendering to the object to be completed
530  * @obj: i915 gem object
531  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
532  * @timeout: how long to wait
533  * @rps: client (user process) to charge for any waitboosting
534  */
535 int
536 i915_gem_object_wait(struct drm_i915_gem_object *obj,
537                      unsigned int flags,
538                      long timeout,
539                      struct intel_rps_client *rps)
540 {
541         might_sleep();
542 #if IS_ENABLED(CONFIG_LOCKDEP)
543         GEM_BUG_ON(debug_locks &&
544                    !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
545                    !!(flags & I915_WAIT_LOCKED));
546 #endif
547         GEM_BUG_ON(timeout < 0);
548
549         timeout = i915_gem_object_wait_reservation(obj->resv,
550                                                    flags, timeout,
551                                                    rps);
552         return timeout < 0 ? timeout : 0;
553 }
554
555 static struct intel_rps_client *to_rps_client(struct drm_file *file)
556 {
557         struct drm_i915_file_private *fpriv = file->driver_priv;
558
559         return &fpriv->rps;
560 }
561
562 int
563 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
564                             int align)
565 {
566         int ret;
567
568         if (align > obj->base.size)
569                 return -EINVAL;
570
571         if (obj->ops == &i915_gem_phys_ops)
572                 return 0;
573
574         if (obj->mm.madv != I915_MADV_WILLNEED)
575                 return -EFAULT;
576
577         if (obj->base.filp == NULL)
578                 return -EINVAL;
579
580         ret = i915_gem_object_unbind(obj);
581         if (ret)
582                 return ret;
583
584         __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
585         if (obj->mm.pages)
586                 return -EBUSY;
587
588         obj->ops = &i915_gem_phys_ops;
589
590         return i915_gem_object_pin_pages(obj);
591 }
592
593 static int
594 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
595                      struct drm_i915_gem_pwrite *args,
596                      struct drm_file *file)
597 {
598         struct drm_device *dev = obj->base.dev;
599         void *vaddr = obj->phys_handle->vaddr + args->offset;
600         char __user *user_data = u64_to_user_ptr(args->data_ptr);
601         int ret;
602
603         /* We manually control the domain here and pretend that it
604          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
605          */
606         lockdep_assert_held(&obj->base.dev->struct_mutex);
607         ret = i915_gem_object_wait(obj,
608                                    I915_WAIT_INTERRUPTIBLE |
609                                    I915_WAIT_LOCKED |
610                                    I915_WAIT_ALL,
611                                    MAX_SCHEDULE_TIMEOUT,
612                                    to_rps_client(file));
613         if (ret)
614                 return ret;
615
616         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
617         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
618                 unsigned long unwritten;
619
620                 /* The physical object once assigned is fixed for the lifetime
621                  * of the obj, so we can safely drop the lock and continue
622                  * to access vaddr.
623                  */
624                 mutex_unlock(&dev->struct_mutex);
625                 unwritten = copy_from_user(vaddr, user_data, args->size);
626                 mutex_lock(&dev->struct_mutex);
627                 if (unwritten) {
628                         ret = -EFAULT;
629                         goto out;
630                 }
631         }
632
633         drm_clflush_virt_range(vaddr, args->size);
634         i915_gem_chipset_flush(to_i915(dev));
635
636 out:
637         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
638         return ret;
639 }
640
641 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
642 {
643         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
644 }
645
646 void i915_gem_object_free(struct drm_i915_gem_object *obj)
647 {
648         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
649         kmem_cache_free(dev_priv->objects, obj);
650 }
651
652 static int
653 i915_gem_create(struct drm_file *file,
654                 struct drm_i915_private *dev_priv,
655                 uint64_t size,
656                 uint32_t *handle_p)
657 {
658         struct drm_i915_gem_object *obj;
659         int ret;
660         u32 handle;
661
662         size = roundup(size, PAGE_SIZE);
663         if (size == 0)
664                 return -EINVAL;
665
666         /* Allocate the new object */
667         obj = i915_gem_object_create(dev_priv, size);
668         if (IS_ERR(obj))
669                 return PTR_ERR(obj);
670
671         ret = drm_gem_handle_create(file, &obj->base, &handle);
672         /* drop reference from allocate - handle holds it now */
673         i915_gem_object_put(obj);
674         if (ret)
675                 return ret;
676
677         *handle_p = handle;
678         return 0;
679 }
680
681 int
682 i915_gem_dumb_create(struct drm_file *file,
683                      struct drm_device *dev,
684                      struct drm_mode_create_dumb *args)
685 {
686         /* have to work out size/pitch and return them */
687         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
688         args->size = args->pitch * args->height;
689         return i915_gem_create(file, to_i915(dev),
690                                args->size, &args->handle);
691 }
692
693 /**
694  * Creates a new mm object and returns a handle to it.
695  * @dev: drm device pointer
696  * @data: ioctl data blob
697  * @file: drm file pointer
698  */
699 int
700 i915_gem_create_ioctl(struct drm_device *dev, void *data,
701                       struct drm_file *file)
702 {
703         struct drm_i915_private *dev_priv = to_i915(dev);
704         struct drm_i915_gem_create *args = data;
705
706         i915_gem_flush_free_objects(dev_priv);
707
708         return i915_gem_create(file, dev_priv,
709                                args->size, &args->handle);
710 }
711
712 static inline int
713 __copy_to_user_swizzled(char __user *cpu_vaddr,
714                         const char *gpu_vaddr, int gpu_offset,
715                         int length)
716 {
717         int ret, cpu_offset = 0;
718
719         while (length > 0) {
720                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
721                 int this_length = min(cacheline_end - gpu_offset, length);
722                 int swizzled_gpu_offset = gpu_offset ^ 64;
723
724                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
725                                      gpu_vaddr + swizzled_gpu_offset,
726                                      this_length);
727                 if (ret)
728                         return ret + length;
729
730                 cpu_offset += this_length;
731                 gpu_offset += this_length;
732                 length -= this_length;
733         }
734
735         return 0;
736 }
737
738 static inline int
739 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
740                           const char __user *cpu_vaddr,
741                           int length)
742 {
743         int ret, cpu_offset = 0;
744
745         while (length > 0) {
746                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
747                 int this_length = min(cacheline_end - gpu_offset, length);
748                 int swizzled_gpu_offset = gpu_offset ^ 64;
749
750                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
751                                        cpu_vaddr + cpu_offset,
752                                        this_length);
753                 if (ret)
754                         return ret + length;
755
756                 cpu_offset += this_length;
757                 gpu_offset += this_length;
758                 length -= this_length;
759         }
760
761         return 0;
762 }
763
764 /*
765  * Pins the specified object's pages and synchronizes the object with
766  * GPU accesses. Sets needs_clflush to non-zero if the caller should
767  * flush the object from the CPU cache.
768  */
769 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
770                                     unsigned int *needs_clflush)
771 {
772         int ret;
773
774         lockdep_assert_held(&obj->base.dev->struct_mutex);
775
776         *needs_clflush = 0;
777         if (!i915_gem_object_has_struct_page(obj))
778                 return -ENODEV;
779
780         ret = i915_gem_object_wait(obj,
781                                    I915_WAIT_INTERRUPTIBLE |
782                                    I915_WAIT_LOCKED,
783                                    MAX_SCHEDULE_TIMEOUT,
784                                    NULL);
785         if (ret)
786                 return ret;
787
788         ret = i915_gem_object_pin_pages(obj);
789         if (ret)
790                 return ret;
791
792         i915_gem_object_flush_gtt_write_domain(obj);
793
794         /* If we're not in the cpu read domain, set ourself into the gtt
795          * read domain and manually flush cachelines (if required). This
796          * optimizes for the case when the gpu will dirty the data
797          * anyway again before the next pread happens.
798          */
799         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
800                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
801                                                         obj->cache_level);
802
803         if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
804                 ret = i915_gem_object_set_to_cpu_domain(obj, false);
805                 if (ret)
806                         goto err_unpin;
807
808                 *needs_clflush = 0;
809         }
810
811         /* return with the pages pinned */
812         return 0;
813
814 err_unpin:
815         i915_gem_object_unpin_pages(obj);
816         return ret;
817 }
818
819 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
820                                      unsigned int *needs_clflush)
821 {
822         int ret;
823
824         lockdep_assert_held(&obj->base.dev->struct_mutex);
825
826         *needs_clflush = 0;
827         if (!i915_gem_object_has_struct_page(obj))
828                 return -ENODEV;
829
830         ret = i915_gem_object_wait(obj,
831                                    I915_WAIT_INTERRUPTIBLE |
832                                    I915_WAIT_LOCKED |
833                                    I915_WAIT_ALL,
834                                    MAX_SCHEDULE_TIMEOUT,
835                                    NULL);
836         if (ret)
837                 return ret;
838
839         ret = i915_gem_object_pin_pages(obj);
840         if (ret)
841                 return ret;
842
843         i915_gem_object_flush_gtt_write_domain(obj);
844
845         /* If we're not in the cpu write domain, set ourself into the
846          * gtt write domain and manually flush cachelines (as required).
847          * This optimizes for the case when the gpu will use the data
848          * right away and we therefore have to clflush anyway.
849          */
850         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
851                 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
852
853         /* Same trick applies to invalidate partially written cachelines read
854          * before writing.
855          */
856         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
857                 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
858                                                          obj->cache_level);
859
860         if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
861                 ret = i915_gem_object_set_to_cpu_domain(obj, true);
862                 if (ret)
863                         goto err_unpin;
864
865                 *needs_clflush = 0;
866         }
867
868         if ((*needs_clflush & CLFLUSH_AFTER) == 0)
869                 obj->cache_dirty = true;
870
871         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
872         obj->mm.dirty = true;
873         /* return with the pages pinned */
874         return 0;
875
876 err_unpin:
877         i915_gem_object_unpin_pages(obj);
878         return ret;
879 }
880
881 static void
882 shmem_clflush_swizzled_range(char *addr, unsigned long length,
883                              bool swizzled)
884 {
885         if (unlikely(swizzled)) {
886                 unsigned long start = (unsigned long) addr;
887                 unsigned long end = (unsigned long) addr + length;
888
889                 /* For swizzling simply ensure that we always flush both
890                  * channels. Lame, but simple and it works. Swizzled
891                  * pwrite/pread is far from a hotpath - current userspace
892                  * doesn't use it at all. */
893                 start = round_down(start, 128);
894                 end = round_up(end, 128);
895
896                 drm_clflush_virt_range((void *)start, end - start);
897         } else {
898                 drm_clflush_virt_range(addr, length);
899         }
900
901 }
902
903 /* Only difference to the fast-path function is that this can handle bit17
904  * and uses non-atomic copy and kmap functions. */
905 static int
906 shmem_pread_slow(struct page *page, int offset, int length,
907                  char __user *user_data,
908                  bool page_do_bit17_swizzling, bool needs_clflush)
909 {
910         char *vaddr;
911         int ret;
912
913         vaddr = kmap(page);
914         if (needs_clflush)
915                 shmem_clflush_swizzled_range(vaddr + offset, length,
916                                              page_do_bit17_swizzling);
917
918         if (page_do_bit17_swizzling)
919                 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
920         else
921                 ret = __copy_to_user(user_data, vaddr + offset, length);
922         kunmap(page);
923
924         return ret ? - EFAULT : 0;
925 }
926
927 static int
928 shmem_pread(struct page *page, int offset, int length, char __user *user_data,
929             bool page_do_bit17_swizzling, bool needs_clflush)
930 {
931         int ret;
932
933         ret = -ENODEV;
934         if (!page_do_bit17_swizzling) {
935                 char *vaddr = kmap_atomic(page);
936
937                 if (needs_clflush)
938                         drm_clflush_virt_range(vaddr + offset, length);
939                 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
940                 kunmap_atomic(vaddr);
941         }
942         if (ret == 0)
943                 return 0;
944
945         return shmem_pread_slow(page, offset, length, user_data,
946                                 page_do_bit17_swizzling, needs_clflush);
947 }
948
949 static int
950 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
951                      struct drm_i915_gem_pread *args)
952 {
953         char __user *user_data;
954         u64 remain;
955         unsigned int obj_do_bit17_swizzling;
956         unsigned int needs_clflush;
957         unsigned int idx, offset;
958         int ret;
959
960         obj_do_bit17_swizzling = 0;
961         if (i915_gem_object_needs_bit17_swizzle(obj))
962                 obj_do_bit17_swizzling = BIT(17);
963
964         ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
965         if (ret)
966                 return ret;
967
968         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
969         mutex_unlock(&obj->base.dev->struct_mutex);
970         if (ret)
971                 return ret;
972
973         remain = args->size;
974         user_data = u64_to_user_ptr(args->data_ptr);
975         offset = offset_in_page(args->offset);
976         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
977                 struct page *page = i915_gem_object_get_page(obj, idx);
978                 int length;
979
980                 length = remain;
981                 if (offset + length > PAGE_SIZE)
982                         length = PAGE_SIZE - offset;
983
984                 ret = shmem_pread(page, offset, length, user_data,
985                                   page_to_phys(page) & obj_do_bit17_swizzling,
986                                   needs_clflush);
987                 if (ret)
988                         break;
989
990                 remain -= length;
991                 user_data += length;
992                 offset = 0;
993         }
994
995         i915_gem_obj_finish_shmem_access(obj);
996         return ret;
997 }
998
999 static inline bool
1000 gtt_user_read(struct io_mapping *mapping,
1001               loff_t base, int offset,
1002               char __user *user_data, int length)
1003 {
1004         void *vaddr;
1005         unsigned long unwritten;
1006
1007         /* We can use the cpu mem copy function because this is X86. */
1008         vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1009         unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
1010         io_mapping_unmap_atomic(vaddr);
1011         if (unwritten) {
1012                 vaddr = (void __force *)
1013                         io_mapping_map_wc(mapping, base, PAGE_SIZE);
1014                 unwritten = copy_to_user(user_data, vaddr + offset, length);
1015                 io_mapping_unmap(vaddr);
1016         }
1017         return unwritten;
1018 }
1019
1020 static int
1021 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1022                    const struct drm_i915_gem_pread *args)
1023 {
1024         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1025         struct i915_ggtt *ggtt = &i915->ggtt;
1026         struct drm_mm_node node;
1027         struct i915_vma *vma;
1028         void __user *user_data;
1029         u64 remain, offset;
1030         int ret;
1031
1032         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1033         if (ret)
1034                 return ret;
1035
1036         intel_runtime_pm_get(i915);
1037         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1038                                        PIN_MAPPABLE | PIN_NONBLOCK);
1039         if (!IS_ERR(vma)) {
1040                 node.start = i915_ggtt_offset(vma);
1041                 node.allocated = false;
1042                 ret = i915_vma_put_fence(vma);
1043                 if (ret) {
1044                         i915_vma_unpin(vma);
1045                         vma = ERR_PTR(ret);
1046                 }
1047         }
1048         if (IS_ERR(vma)) {
1049                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1050                 if (ret)
1051                         goto out_unlock;
1052                 GEM_BUG_ON(!node.allocated);
1053         }
1054
1055         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1056         if (ret)
1057                 goto out_unpin;
1058
1059         mutex_unlock(&i915->drm.struct_mutex);
1060
1061         user_data = u64_to_user_ptr(args->data_ptr);
1062         remain = args->size;
1063         offset = args->offset;
1064
1065         while (remain > 0) {
1066                 /* Operation in this page
1067                  *
1068                  * page_base = page offset within aperture
1069                  * page_offset = offset within page
1070                  * page_length = bytes to copy for this page
1071                  */
1072                 u32 page_base = node.start;
1073                 unsigned page_offset = offset_in_page(offset);
1074                 unsigned page_length = PAGE_SIZE - page_offset;
1075                 page_length = remain < page_length ? remain : page_length;
1076                 if (node.allocated) {
1077                         wmb();
1078                         ggtt->base.insert_page(&ggtt->base,
1079                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1080                                                node.start, I915_CACHE_NONE, 0);
1081                         wmb();
1082                 } else {
1083                         page_base += offset & PAGE_MASK;
1084                 }
1085
1086                 if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
1087                                   user_data, page_length)) {
1088                         ret = -EFAULT;
1089                         break;
1090                 }
1091
1092                 remain -= page_length;
1093                 user_data += page_length;
1094                 offset += page_length;
1095         }
1096
1097         mutex_lock(&i915->drm.struct_mutex);
1098 out_unpin:
1099         if (node.allocated) {
1100                 wmb();
1101                 ggtt->base.clear_range(&ggtt->base,
1102                                        node.start, node.size);
1103                 remove_mappable_node(&node);
1104         } else {
1105                 i915_vma_unpin(vma);
1106         }
1107 out_unlock:
1108         intel_runtime_pm_put(i915);
1109         mutex_unlock(&i915->drm.struct_mutex);
1110
1111         return ret;
1112 }
1113
1114 /**
1115  * Reads data from the object referenced by handle.
1116  * @dev: drm device pointer
1117  * @data: ioctl data blob
1118  * @file: drm file pointer
1119  *
1120  * On error, the contents of *data are undefined.
1121  */
1122 int
1123 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1124                      struct drm_file *file)
1125 {
1126         struct drm_i915_gem_pread *args = data;
1127         struct drm_i915_gem_object *obj;
1128         int ret;
1129
1130         if (args->size == 0)
1131                 return 0;
1132
1133         if (!access_ok(VERIFY_WRITE,
1134                        u64_to_user_ptr(args->data_ptr),
1135                        args->size))
1136                 return -EFAULT;
1137
1138         obj = i915_gem_object_lookup(file, args->handle);
1139         if (!obj)
1140                 return -ENOENT;
1141
1142         /* Bounds check source.  */
1143         if (args->offset > obj->base.size ||
1144             args->size > obj->base.size - args->offset) {
1145                 ret = -EINVAL;
1146                 goto out;
1147         }
1148
1149         trace_i915_gem_object_pread(obj, args->offset, args->size);
1150
1151         ret = i915_gem_object_wait(obj,
1152                                    I915_WAIT_INTERRUPTIBLE,
1153                                    MAX_SCHEDULE_TIMEOUT,
1154                                    to_rps_client(file));
1155         if (ret)
1156                 goto out;
1157
1158         ret = i915_gem_object_pin_pages(obj);
1159         if (ret)
1160                 goto out;
1161
1162         ret = i915_gem_shmem_pread(obj, args);
1163         if (ret == -EFAULT || ret == -ENODEV)
1164                 ret = i915_gem_gtt_pread(obj, args);
1165
1166         i915_gem_object_unpin_pages(obj);
1167 out:
1168         i915_gem_object_put(obj);
1169         return ret;
1170 }
1171
1172 /* This is the fast write path which cannot handle
1173  * page faults in the source data
1174  */
1175
1176 static inline bool
1177 ggtt_write(struct io_mapping *mapping,
1178            loff_t base, int offset,
1179            char __user *user_data, int length)
1180 {
1181         void *vaddr;
1182         unsigned long unwritten;
1183
1184         /* We can use the cpu mem copy function because this is X86. */
1185         vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1186         unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
1187                                                       user_data, length);
1188         io_mapping_unmap_atomic(vaddr);
1189         if (unwritten) {
1190                 vaddr = (void __force *)
1191                         io_mapping_map_wc(mapping, base, PAGE_SIZE);
1192                 unwritten = copy_from_user(vaddr + offset, user_data, length);
1193                 io_mapping_unmap(vaddr);
1194         }
1195
1196         return unwritten;
1197 }
1198
1199 /**
1200  * This is the fast pwrite path, where we copy the data directly from the
1201  * user into the GTT, uncached.
1202  * @obj: i915 GEM object
1203  * @args: pwrite arguments structure
1204  */
1205 static int
1206 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1207                          const struct drm_i915_gem_pwrite *args)
1208 {
1209         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1210         struct i915_ggtt *ggtt = &i915->ggtt;
1211         struct drm_mm_node node;
1212         struct i915_vma *vma;
1213         u64 remain, offset;
1214         void __user *user_data;
1215         int ret;
1216
1217         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1218         if (ret)
1219                 return ret;
1220
1221         intel_runtime_pm_get(i915);
1222         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1223                                        PIN_MAPPABLE | PIN_NONBLOCK);
1224         if (!IS_ERR(vma)) {
1225                 node.start = i915_ggtt_offset(vma);
1226                 node.allocated = false;
1227                 ret = i915_vma_put_fence(vma);
1228                 if (ret) {
1229                         i915_vma_unpin(vma);
1230                         vma = ERR_PTR(ret);
1231                 }
1232         }
1233         if (IS_ERR(vma)) {
1234                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1235                 if (ret)
1236                         goto out_unlock;
1237                 GEM_BUG_ON(!node.allocated);
1238         }
1239
1240         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1241         if (ret)
1242                 goto out_unpin;
1243
1244         mutex_unlock(&i915->drm.struct_mutex);
1245
1246         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1247
1248         user_data = u64_to_user_ptr(args->data_ptr);
1249         offset = args->offset;
1250         remain = args->size;
1251         while (remain) {
1252                 /* Operation in this page
1253                  *
1254                  * page_base = page offset within aperture
1255                  * page_offset = offset within page
1256                  * page_length = bytes to copy for this page
1257                  */
1258                 u32 page_base = node.start;
1259                 unsigned int page_offset = offset_in_page(offset);
1260                 unsigned int page_length = PAGE_SIZE - page_offset;
1261                 page_length = remain < page_length ? remain : page_length;
1262                 if (node.allocated) {
1263                         wmb(); /* flush the write before we modify the GGTT */
1264                         ggtt->base.insert_page(&ggtt->base,
1265                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1266                                                node.start, I915_CACHE_NONE, 0);
1267                         wmb(); /* flush modifications to the GGTT (insert_page) */
1268                 } else {
1269                         page_base += offset & PAGE_MASK;
1270                 }
1271                 /* If we get a fault while copying data, then (presumably) our
1272                  * source page isn't available.  Return the error and we'll
1273                  * retry in the slow path.
1274                  * If the object is non-shmem backed, we retry again with the
1275                  * path that handles page fault.
1276                  */
1277                 if (ggtt_write(&ggtt->mappable, page_base, page_offset,
1278                                user_data, page_length)) {
1279                         ret = -EFAULT;
1280                         break;
1281                 }
1282
1283                 remain -= page_length;
1284                 user_data += page_length;
1285                 offset += page_length;
1286         }
1287         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1288
1289         mutex_lock(&i915->drm.struct_mutex);
1290 out_unpin:
1291         if (node.allocated) {
1292                 wmb();
1293                 ggtt->base.clear_range(&ggtt->base,
1294                                        node.start, node.size);
1295                 remove_mappable_node(&node);
1296         } else {
1297                 i915_vma_unpin(vma);
1298         }
1299 out_unlock:
1300         intel_runtime_pm_put(i915);
1301         mutex_unlock(&i915->drm.struct_mutex);
1302         return ret;
1303 }
1304
1305 static int
1306 shmem_pwrite_slow(struct page *page, int offset, int length,
1307                   char __user *user_data,
1308                   bool page_do_bit17_swizzling,
1309                   bool needs_clflush_before,
1310                   bool needs_clflush_after)
1311 {
1312         char *vaddr;
1313         int ret;
1314
1315         vaddr = kmap(page);
1316         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1317                 shmem_clflush_swizzled_range(vaddr + offset, length,
1318                                              page_do_bit17_swizzling);
1319         if (page_do_bit17_swizzling)
1320                 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1321                                                 length);
1322         else
1323                 ret = __copy_from_user(vaddr + offset, user_data, length);
1324         if (needs_clflush_after)
1325                 shmem_clflush_swizzled_range(vaddr + offset, length,
1326                                              page_do_bit17_swizzling);
1327         kunmap(page);
1328
1329         return ret ? -EFAULT : 0;
1330 }
1331
1332 /* Per-page copy function for the shmem pwrite fastpath.
1333  * Flushes invalid cachelines before writing to the target if
1334  * needs_clflush_before is set and flushes out any written cachelines after
1335  * writing if needs_clflush is set.
1336  */
1337 static int
1338 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1339              bool page_do_bit17_swizzling,
1340              bool needs_clflush_before,
1341              bool needs_clflush_after)
1342 {
1343         int ret;
1344
1345         ret = -ENODEV;
1346         if (!page_do_bit17_swizzling) {
1347                 char *vaddr = kmap_atomic(page);
1348
1349                 if (needs_clflush_before)
1350                         drm_clflush_virt_range(vaddr + offset, len);
1351                 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1352                 if (needs_clflush_after)
1353                         drm_clflush_virt_range(vaddr + offset, len);
1354
1355                 kunmap_atomic(vaddr);
1356         }
1357         if (ret == 0)
1358                 return ret;
1359
1360         return shmem_pwrite_slow(page, offset, len, user_data,
1361                                  page_do_bit17_swizzling,
1362                                  needs_clflush_before,
1363                                  needs_clflush_after);
1364 }
1365
1366 static int
1367 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1368                       const struct drm_i915_gem_pwrite *args)
1369 {
1370         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1371         void __user *user_data;
1372         u64 remain;
1373         unsigned int obj_do_bit17_swizzling;
1374         unsigned int partial_cacheline_write;
1375         unsigned int needs_clflush;
1376         unsigned int offset, idx;
1377         int ret;
1378
1379         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1380         if (ret)
1381                 return ret;
1382
1383         ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1384         mutex_unlock(&i915->drm.struct_mutex);
1385         if (ret)
1386                 return ret;
1387
1388         obj_do_bit17_swizzling = 0;
1389         if (i915_gem_object_needs_bit17_swizzle(obj))
1390                 obj_do_bit17_swizzling = BIT(17);
1391
1392         /* If we don't overwrite a cacheline completely we need to be
1393          * careful to have up-to-date data by first clflushing. Don't
1394          * overcomplicate things and flush the entire patch.
1395          */
1396         partial_cacheline_write = 0;
1397         if (needs_clflush & CLFLUSH_BEFORE)
1398                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1399
1400         user_data = u64_to_user_ptr(args->data_ptr);
1401         remain = args->size;
1402         offset = offset_in_page(args->offset);
1403         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1404                 struct page *page = i915_gem_object_get_page(obj, idx);
1405                 int length;
1406
1407                 length = remain;
1408                 if (offset + length > PAGE_SIZE)
1409                         length = PAGE_SIZE - offset;
1410
1411                 ret = shmem_pwrite(page, offset, length, user_data,
1412                                    page_to_phys(page) & obj_do_bit17_swizzling,
1413                                    (offset | length) & partial_cacheline_write,
1414                                    needs_clflush & CLFLUSH_AFTER);
1415                 if (ret)
1416                         break;
1417
1418                 remain -= length;
1419                 user_data += length;
1420                 offset = 0;
1421         }
1422
1423         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1424         i915_gem_obj_finish_shmem_access(obj);
1425         return ret;
1426 }
1427
1428 /**
1429  * Writes data to the object referenced by handle.
1430  * @dev: drm device
1431  * @data: ioctl data blob
1432  * @file: drm file
1433  *
1434  * On error, the contents of the buffer that were to be modified are undefined.
1435  */
1436 int
1437 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1438                       struct drm_file *file)
1439 {
1440         struct drm_i915_gem_pwrite *args = data;
1441         struct drm_i915_gem_object *obj;
1442         int ret;
1443
1444         if (args->size == 0)
1445                 return 0;
1446
1447         if (!access_ok(VERIFY_READ,
1448                        u64_to_user_ptr(args->data_ptr),
1449                        args->size))
1450                 return -EFAULT;
1451
1452         obj = i915_gem_object_lookup(file, args->handle);
1453         if (!obj)
1454                 return -ENOENT;
1455
1456         /* Bounds check destination. */
1457         if (args->offset > obj->base.size ||
1458             args->size > obj->base.size - args->offset) {
1459                 ret = -EINVAL;
1460                 goto err;
1461         }
1462
1463         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1464
1465         ret = i915_gem_object_wait(obj,
1466                                    I915_WAIT_INTERRUPTIBLE |
1467                                    I915_WAIT_ALL,
1468                                    MAX_SCHEDULE_TIMEOUT,
1469                                    to_rps_client(file));
1470         if (ret)
1471                 goto err;
1472
1473         ret = i915_gem_object_pin_pages(obj);
1474         if (ret)
1475                 goto err;
1476
1477         ret = -EFAULT;
1478         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1479          * it would end up going through the fenced access, and we'll get
1480          * different detiling behavior between reading and writing.
1481          * pread/pwrite currently are reading and writing from the CPU
1482          * perspective, requiring manual detiling by the client.
1483          */
1484         if (!i915_gem_object_has_struct_page(obj) ||
1485             cpu_write_needs_clflush(obj))
1486                 /* Note that the gtt paths might fail with non-page-backed user
1487                  * pointers (e.g. gtt mappings when moving data between
1488                  * textures). Fallback to the shmem path in that case.
1489                  */
1490                 ret = i915_gem_gtt_pwrite_fast(obj, args);
1491
1492         if (ret == -EFAULT || ret == -ENOSPC) {
1493                 if (obj->phys_handle)
1494                         ret = i915_gem_phys_pwrite(obj, args, file);
1495                 else
1496                         ret = i915_gem_shmem_pwrite(obj, args);
1497         }
1498
1499         i915_gem_object_unpin_pages(obj);
1500 err:
1501         i915_gem_object_put(obj);
1502         return ret;
1503 }
1504
1505 static inline enum fb_op_origin
1506 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1507 {
1508         return (domain == I915_GEM_DOMAIN_GTT ?
1509                 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1510 }
1511
1512 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1513 {
1514         struct drm_i915_private *i915;
1515         struct list_head *list;
1516         struct i915_vma *vma;
1517
1518         list_for_each_entry(vma, &obj->vma_list, obj_link) {
1519                 if (!i915_vma_is_ggtt(vma))
1520                         continue;
1521
1522                 if (i915_vma_is_active(vma))
1523                         continue;
1524
1525                 if (!drm_mm_node_allocated(&vma->node))
1526                         continue;
1527
1528                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1529         }
1530
1531         i915 = to_i915(obj->base.dev);
1532         list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1533         list_move_tail(&obj->global_link, list);
1534 }
1535
1536 /**
1537  * Called when user space prepares to use an object with the CPU, either
1538  * through the mmap ioctl's mapping or a GTT mapping.
1539  * @dev: drm device
1540  * @data: ioctl data blob
1541  * @file: drm file
1542  */
1543 int
1544 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1545                           struct drm_file *file)
1546 {
1547         struct drm_i915_gem_set_domain *args = data;
1548         struct drm_i915_gem_object *obj;
1549         uint32_t read_domains = args->read_domains;
1550         uint32_t write_domain = args->write_domain;
1551         int err;
1552
1553         /* Only handle setting domains to types used by the CPU. */
1554         if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1555                 return -EINVAL;
1556
1557         /* Having something in the write domain implies it's in the read
1558          * domain, and only that read domain.  Enforce that in the request.
1559          */
1560         if (write_domain != 0 && read_domains != write_domain)
1561                 return -EINVAL;
1562
1563         obj = i915_gem_object_lookup(file, args->handle);
1564         if (!obj)
1565                 return -ENOENT;
1566
1567         /* Try to flush the object off the GPU without holding the lock.
1568          * We will repeat the flush holding the lock in the normal manner
1569          * to catch cases where we are gazumped.
1570          */
1571         err = i915_gem_object_wait(obj,
1572                                    I915_WAIT_INTERRUPTIBLE |
1573                                    (write_domain ? I915_WAIT_ALL : 0),
1574                                    MAX_SCHEDULE_TIMEOUT,
1575                                    to_rps_client(file));
1576         if (err)
1577                 goto out;
1578
1579         /* Flush and acquire obj->pages so that we are coherent through
1580          * direct access in memory with previous cached writes through
1581          * shmemfs and that our cache domain tracking remains valid.
1582          * For example, if the obj->filp was moved to swap without us
1583          * being notified and releasing the pages, we would mistakenly
1584          * continue to assume that the obj remained out of the CPU cached
1585          * domain.
1586          */
1587         err = i915_gem_object_pin_pages(obj);
1588         if (err)
1589                 goto out;
1590
1591         err = i915_mutex_lock_interruptible(dev);
1592         if (err)
1593                 goto out_unpin;
1594
1595         if (read_domains & I915_GEM_DOMAIN_GTT)
1596                 err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1597         else
1598                 err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1599
1600         /* And bump the LRU for this access */
1601         i915_gem_object_bump_inactive_ggtt(obj);
1602
1603         mutex_unlock(&dev->struct_mutex);
1604
1605         if (write_domain != 0)
1606                 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1607
1608 out_unpin:
1609         i915_gem_object_unpin_pages(obj);
1610 out:
1611         i915_gem_object_put(obj);
1612         return err;
1613 }
1614
1615 /**
1616  * Called when user space has done writes to this buffer
1617  * @dev: drm device
1618  * @data: ioctl data blob
1619  * @file: drm file
1620  */
1621 int
1622 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1623                          struct drm_file *file)
1624 {
1625         struct drm_i915_gem_sw_finish *args = data;
1626         struct drm_i915_gem_object *obj;
1627         int err = 0;
1628
1629         obj = i915_gem_object_lookup(file, args->handle);
1630         if (!obj)
1631                 return -ENOENT;
1632
1633         /* Pinned buffers may be scanout, so flush the cache */
1634         if (READ_ONCE(obj->pin_display)) {
1635                 err = i915_mutex_lock_interruptible(dev);
1636                 if (!err) {
1637                         i915_gem_object_flush_cpu_write_domain(obj);
1638                         mutex_unlock(&dev->struct_mutex);
1639                 }
1640         }
1641
1642         i915_gem_object_put(obj);
1643         return err;
1644 }
1645
1646 /**
1647  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1648  *                       it is mapped to.
1649  * @dev: drm device
1650  * @data: ioctl data blob
1651  * @file: drm file
1652  *
1653  * While the mapping holds a reference on the contents of the object, it doesn't
1654  * imply a ref on the object itself.
1655  *
1656  * IMPORTANT:
1657  *
1658  * DRM driver writers who look a this function as an example for how to do GEM
1659  * mmap support, please don't implement mmap support like here. The modern way
1660  * to implement DRM mmap support is with an mmap offset ioctl (like
1661  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1662  * That way debug tooling like valgrind will understand what's going on, hiding
1663  * the mmap call in a driver private ioctl will break that. The i915 driver only
1664  * does cpu mmaps this way because we didn't know better.
1665  */
1666 int
1667 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1668                     struct drm_file *file)
1669 {
1670         struct drm_i915_gem_mmap *args = data;
1671         struct drm_i915_gem_object *obj;
1672         unsigned long addr;
1673
1674         if (args->flags & ~(I915_MMAP_WC))
1675                 return -EINVAL;
1676
1677         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1678                 return -ENODEV;
1679
1680         obj = i915_gem_object_lookup(file, args->handle);
1681         if (!obj)
1682                 return -ENOENT;
1683
1684         /* prime objects have no backing filp to GEM mmap
1685          * pages from.
1686          */
1687         if (!obj->base.filp) {
1688                 i915_gem_object_put(obj);
1689                 return -EINVAL;
1690         }
1691
1692         addr = vm_mmap(obj->base.filp, 0, args->size,
1693                        PROT_READ | PROT_WRITE, MAP_SHARED,
1694                        args->offset);
1695         if (args->flags & I915_MMAP_WC) {
1696                 struct mm_struct *mm = current->mm;
1697                 struct vm_area_struct *vma;
1698
1699                 if (down_write_killable(&mm->mmap_sem)) {
1700                         i915_gem_object_put(obj);
1701                         return -EINTR;
1702                 }
1703                 vma = find_vma(mm, addr);
1704                 if (vma)
1705                         vma->vm_page_prot =
1706                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1707                 else
1708                         addr = -ENOMEM;
1709                 up_write(&mm->mmap_sem);
1710
1711                 /* This may race, but that's ok, it only gets set */
1712                 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1713         }
1714         i915_gem_object_put(obj);
1715         if (IS_ERR((void *)addr))
1716                 return addr;
1717
1718         args->addr_ptr = (uint64_t) addr;
1719
1720         return 0;
1721 }
1722
1723 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1724 {
1725         u64 size;
1726
1727         size = i915_gem_object_get_stride(obj);
1728         size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1729
1730         return size >> PAGE_SHIFT;
1731 }
1732
1733 /**
1734  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1735  *
1736  * A history of the GTT mmap interface:
1737  *
1738  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1739  *     aligned and suitable for fencing, and still fit into the available
1740  *     mappable space left by the pinned display objects. A classic problem
1741  *     we called the page-fault-of-doom where we would ping-pong between
1742  *     two objects that could not fit inside the GTT and so the memcpy
1743  *     would page one object in at the expense of the other between every
1744  *     single byte.
1745  *
1746  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1747  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1748  *     object is too large for the available space (or simply too large
1749  *     for the mappable aperture!), a view is created instead and faulted
1750  *     into userspace. (This view is aligned and sized appropriately for
1751  *     fenced access.)
1752  *
1753  * Restrictions:
1754  *
1755  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
1756  *    hangs on some architectures, corruption on others. An attempt to service
1757  *    a GTT page fault from a snoopable object will generate a SIGBUS.
1758  *
1759  *  * the object must be able to fit into RAM (physical memory, though no
1760  *    limited to the mappable aperture).
1761  *
1762  *
1763  * Caveats:
1764  *
1765  *  * a new GTT page fault will synchronize rendering from the GPU and flush
1766  *    all data to system memory. Subsequent access will not be synchronized.
1767  *
1768  *  * all mappings are revoked on runtime device suspend.
1769  *
1770  *  * there are only 8, 16 or 32 fence registers to share between all users
1771  *    (older machines require fence register for display and blitter access
1772  *    as well). Contention of the fence registers will cause the previous users
1773  *    to be unmapped and any new access will generate new page faults.
1774  *
1775  *  * running out of memory while servicing a fault may generate a SIGBUS,
1776  *    rather than the expected SIGSEGV.
1777  */
1778 int i915_gem_mmap_gtt_version(void)
1779 {
1780         return 1;
1781 }
1782
1783 /**
1784  * i915_gem_fault - fault a page into the GTT
1785  * @area: CPU VMA in question
1786  * @vmf: fault info
1787  *
1788  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1789  * from userspace.  The fault handler takes care of binding the object to
1790  * the GTT (if needed), allocating and programming a fence register (again,
1791  * only if needed based on whether the old reg is still valid or the object
1792  * is tiled) and inserting a new PTE into the faulting process.
1793  *
1794  * Note that the faulting process may involve evicting existing objects
1795  * from the GTT and/or fence registers to make room.  So performance may
1796  * suffer if the GTT working set is large or there are few fence registers
1797  * left.
1798  *
1799  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1800  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1801  */
1802 int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1803 {
1804 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1805         struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1806         struct drm_device *dev = obj->base.dev;
1807         struct drm_i915_private *dev_priv = to_i915(dev);
1808         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1809         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1810         struct i915_vma *vma;
1811         pgoff_t page_offset;
1812         unsigned int flags;
1813         int ret;
1814
1815         /* We don't use vmf->pgoff since that has the fake offset */
1816         page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1817                 PAGE_SHIFT;
1818
1819         trace_i915_gem_object_fault(obj, page_offset, true, write);
1820
1821         /* Try to flush the object off the GPU first without holding the lock.
1822          * Upon acquiring the lock, we will perform our sanity checks and then
1823          * repeat the flush holding the lock in the normal manner to catch cases
1824          * where we are gazumped.
1825          */
1826         ret = i915_gem_object_wait(obj,
1827                                    I915_WAIT_INTERRUPTIBLE,
1828                                    MAX_SCHEDULE_TIMEOUT,
1829                                    NULL);
1830         if (ret)
1831                 goto err;
1832
1833         ret = i915_gem_object_pin_pages(obj);
1834         if (ret)
1835                 goto err;
1836
1837         intel_runtime_pm_get(dev_priv);
1838
1839         ret = i915_mutex_lock_interruptible(dev);
1840         if (ret)
1841                 goto err_rpm;
1842
1843         /* Access to snoopable pages through the GTT is incoherent. */
1844         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1845                 ret = -EFAULT;
1846                 goto err_unlock;
1847         }
1848
1849         /* If the object is smaller than a couple of partial vma, it is
1850          * not worth only creating a single partial vma - we may as well
1851          * clear enough space for the full object.
1852          */
1853         flags = PIN_MAPPABLE;
1854         if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1855                 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1856
1857         /* Now pin it into the GTT as needed */
1858         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1859         if (IS_ERR(vma)) {
1860                 struct i915_ggtt_view view;
1861                 unsigned int chunk_size;
1862
1863                 /* Use a partial view if it is bigger than available space */
1864                 chunk_size = MIN_CHUNK_PAGES;
1865                 if (i915_gem_object_is_tiled(obj))
1866                         chunk_size = roundup(chunk_size, tile_row_pages(obj));
1867
1868                 memset(&view, 0, sizeof(view));
1869                 view.type = I915_GGTT_VIEW_PARTIAL;
1870                 view.params.partial.offset = rounddown(page_offset, chunk_size);
1871                 view.params.partial.size =
1872                         min_t(unsigned int, chunk_size,
1873                               vma_pages(area) - view.params.partial.offset);
1874
1875                 /* If the partial covers the entire object, just create a
1876                  * normal VMA.
1877                  */
1878                 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1879                         view.type = I915_GGTT_VIEW_NORMAL;
1880
1881                 /* Userspace is now writing through an untracked VMA, abandon
1882                  * all hope that the hardware is able to track future writes.
1883                  */
1884                 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1885
1886                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1887         }
1888         if (IS_ERR(vma)) {
1889                 ret = PTR_ERR(vma);
1890                 goto err_unlock;
1891         }
1892
1893         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1894         if (ret)
1895                 goto err_unpin;
1896
1897         ret = i915_vma_get_fence(vma);
1898         if (ret)
1899                 goto err_unpin;
1900
1901         /* Mark as being mmapped into userspace for later revocation */
1902         assert_rpm_wakelock_held(dev_priv);
1903         if (list_empty(&obj->userfault_link))
1904                 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1905
1906         /* Finally, remap it using the new GTT offset */
1907         ret = remap_io_mapping(area,
1908                                area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1909                                (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1910                                min_t(u64, vma->size, area->vm_end - area->vm_start),
1911                                &ggtt->mappable);
1912
1913 err_unpin:
1914         __i915_vma_unpin(vma);
1915 err_unlock:
1916         mutex_unlock(&dev->struct_mutex);
1917 err_rpm:
1918         intel_runtime_pm_put(dev_priv);
1919         i915_gem_object_unpin_pages(obj);
1920 err:
1921         switch (ret) {
1922         case -EIO:
1923                 /*
1924                  * We eat errors when the gpu is terminally wedged to avoid
1925                  * userspace unduly crashing (gl has no provisions for mmaps to
1926                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1927                  * and so needs to be reported.
1928                  */
1929                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1930                         ret = VM_FAULT_SIGBUS;
1931                         break;
1932                 }
1933         case -EAGAIN:
1934                 /*
1935                  * EAGAIN means the gpu is hung and we'll wait for the error
1936                  * handler to reset everything when re-faulting in
1937                  * i915_mutex_lock_interruptible.
1938                  */
1939         case 0:
1940         case -ERESTARTSYS:
1941         case -EINTR:
1942         case -EBUSY:
1943                 /*
1944                  * EBUSY is ok: this just means that another thread
1945                  * already did the job.
1946                  */
1947                 ret = VM_FAULT_NOPAGE;
1948                 break;
1949         case -ENOMEM:
1950                 ret = VM_FAULT_OOM;
1951                 break;
1952         case -ENOSPC:
1953         case -EFAULT:
1954                 ret = VM_FAULT_SIGBUS;
1955                 break;
1956         default:
1957                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1958                 ret = VM_FAULT_SIGBUS;
1959                 break;
1960         }
1961         return ret;
1962 }
1963
1964 /**
1965  * i915_gem_release_mmap - remove physical page mappings
1966  * @obj: obj in question
1967  *
1968  * Preserve the reservation of the mmapping with the DRM core code, but
1969  * relinquish ownership of the pages back to the system.
1970  *
1971  * It is vital that we remove the page mapping if we have mapped a tiled
1972  * object through the GTT and then lose the fence register due to
1973  * resource pressure. Similarly if the object has been moved out of the
1974  * aperture, than pages mapped into userspace must be revoked. Removing the
1975  * mapping will then trigger a page fault on the next user access, allowing
1976  * fixup by i915_gem_fault().
1977  */
1978 void
1979 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1980 {
1981         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1982
1983         /* Serialisation between user GTT access and our code depends upon
1984          * revoking the CPU's PTE whilst the mutex is held. The next user
1985          * pagefault then has to wait until we release the mutex.
1986          *
1987          * Note that RPM complicates somewhat by adding an additional
1988          * requirement that operations to the GGTT be made holding the RPM
1989          * wakeref.
1990          */
1991         lockdep_assert_held(&i915->drm.struct_mutex);
1992         intel_runtime_pm_get(i915);
1993
1994         if (list_empty(&obj->userfault_link))
1995                 goto out;
1996
1997         list_del_init(&obj->userfault_link);
1998         drm_vma_node_unmap(&obj->base.vma_node,
1999                            obj->base.dev->anon_inode->i_mapping);
2000
2001         /* Ensure that the CPU's PTE are revoked and there are not outstanding
2002          * memory transactions from userspace before we return. The TLB
2003          * flushing implied above by changing the PTE above *should* be
2004          * sufficient, an extra barrier here just provides us with a bit
2005          * of paranoid documentation about our requirement to serialise
2006          * memory writes before touching registers / GSM.
2007          */
2008         wmb();
2009
2010 out:
2011         intel_runtime_pm_put(i915);
2012 }
2013
2014 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2015 {
2016         struct drm_i915_gem_object *obj, *on;
2017         int i;
2018
2019         /*
2020          * Only called during RPM suspend. All users of the userfault_list
2021          * must be holding an RPM wakeref to ensure that this can not
2022          * run concurrently with themselves (and use the struct_mutex for
2023          * protection between themselves).
2024          */
2025
2026         list_for_each_entry_safe(obj, on,
2027                                  &dev_priv->mm.userfault_list, userfault_link) {
2028                 list_del_init(&obj->userfault_link);
2029                 drm_vma_node_unmap(&obj->base.vma_node,
2030                                    obj->base.dev->anon_inode->i_mapping);
2031         }
2032
2033         /* The fence will be lost when the device powers down. If any were
2034          * in use by hardware (i.e. they are pinned), we should not be powering
2035          * down! All other fences will be reacquired by the user upon waking.
2036          */
2037         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2038                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2039
2040                 if (WARN_ON(reg->pin_count))
2041                         continue;
2042
2043                 if (!reg->vma)
2044                         continue;
2045
2046                 GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
2047                 reg->dirty = true;
2048         }
2049 }
2050
2051 /**
2052  * i915_gem_get_ggtt_size - return required global GTT size for an object
2053  * @dev_priv: i915 device
2054  * @size: object size
2055  * @tiling_mode: tiling mode
2056  *
2057  * Return the required global GTT size for an object, taking into account
2058  * potential fence register mapping.
2059  */
2060 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
2061                            u64 size, int tiling_mode)
2062 {
2063         u64 ggtt_size;
2064
2065         GEM_BUG_ON(size == 0);
2066
2067         if (INTEL_GEN(dev_priv) >= 4 ||
2068             tiling_mode == I915_TILING_NONE)
2069                 return size;
2070
2071         /* Previous chips need a power-of-two fence region when tiling */
2072         if (IS_GEN3(dev_priv))
2073                 ggtt_size = 1024*1024;
2074         else
2075                 ggtt_size = 512*1024;
2076
2077         while (ggtt_size < size)
2078                 ggtt_size <<= 1;
2079
2080         return ggtt_size;
2081 }
2082
2083 /**
2084  * i915_gem_get_ggtt_alignment - return required global GTT alignment
2085  * @dev_priv: i915 device
2086  * @size: object size
2087  * @tiling_mode: tiling mode
2088  * @fenced: is fenced alignment required or not
2089  *
2090  * Return the required global GTT alignment for an object, taking into account
2091  * potential fence register mapping.
2092  */
2093 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2094                                 int tiling_mode, bool fenced)
2095 {
2096         GEM_BUG_ON(size == 0);
2097
2098         /*
2099          * Minimum alignment is 4k (GTT page size), but might be greater
2100          * if a fence register is needed for the object.
2101          */
2102         if (INTEL_GEN(dev_priv) >= 4 ||
2103             (!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) ||
2104             tiling_mode == I915_TILING_NONE)
2105                 return 4096;
2106
2107         /*
2108          * Previous chips need to be aligned to the size of the smallest
2109          * fence register that can contain the object.
2110          */
2111         return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2112 }
2113
2114 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2115 {
2116         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2117         int err;
2118
2119         err = drm_gem_create_mmap_offset(&obj->base);
2120         if (!err)
2121                 return 0;
2122
2123         /* We can idle the GPU locklessly to flush stale objects, but in order
2124          * to claim that space for ourselves, we need to take the big
2125          * struct_mutex to free the requests+objects and allocate our slot.
2126          */
2127         err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2128         if (err)
2129                 return err;
2130
2131         err = i915_mutex_lock_interruptible(&dev_priv->drm);
2132         if (!err) {
2133                 i915_gem_retire_requests(dev_priv);
2134                 err = drm_gem_create_mmap_offset(&obj->base);
2135                 mutex_unlock(&dev_priv->drm.struct_mutex);
2136         }
2137
2138         return err;
2139 }
2140
2141 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2142 {
2143         drm_gem_free_mmap_offset(&obj->base);
2144 }
2145
2146 int
2147 i915_gem_mmap_gtt(struct drm_file *file,
2148                   struct drm_device *dev,
2149                   uint32_t handle,
2150                   uint64_t *offset)
2151 {
2152         struct drm_i915_gem_object *obj;
2153         int ret;
2154
2155         obj = i915_gem_object_lookup(file, handle);
2156         if (!obj)
2157                 return -ENOENT;
2158
2159         ret = i915_gem_object_create_mmap_offset(obj);
2160         if (ret == 0)
2161                 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2162
2163         i915_gem_object_put(obj);
2164         return ret;
2165 }
2166
2167 /**
2168  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2169  * @dev: DRM device
2170  * @data: GTT mapping ioctl data
2171  * @file: GEM object info
2172  *
2173  * Simply returns the fake offset to userspace so it can mmap it.
2174  * The mmap call will end up in drm_gem_mmap(), which will set things
2175  * up so we can get faults in the handler above.
2176  *
2177  * The fault handler will take care of binding the object into the GTT
2178  * (since it may have been evicted to make room for something), allocating
2179  * a fence register, and mapping the appropriate aperture address into
2180  * userspace.
2181  */
2182 int
2183 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2184                         struct drm_file *file)
2185 {
2186         struct drm_i915_gem_mmap_gtt *args = data;
2187
2188         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2189 }
2190
2191 /* Immediately discard the backing storage */
2192 static void
2193 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2194 {
2195         i915_gem_object_free_mmap_offset(obj);
2196
2197         if (obj->base.filp == NULL)
2198                 return;
2199
2200         /* Our goal here is to return as much of the memory as
2201          * is possible back to the system as we are called from OOM.
2202          * To do this we must instruct the shmfs to drop all of its
2203          * backing pages, *now*.
2204          */
2205         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2206         obj->mm.madv = __I915_MADV_PURGED;
2207 }
2208
2209 /* Try to discard unwanted pages */
2210 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2211 {
2212         struct address_space *mapping;
2213
2214         lockdep_assert_held(&obj->mm.lock);
2215         GEM_BUG_ON(obj->mm.pages);
2216
2217         switch (obj->mm.madv) {
2218         case I915_MADV_DONTNEED:
2219                 i915_gem_object_truncate(obj);
2220         case __I915_MADV_PURGED:
2221                 return;
2222         }
2223
2224         if (obj->base.filp == NULL)
2225                 return;
2226
2227         mapping = obj->base.filp->f_mapping,
2228         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2229 }
2230
2231 static void
2232 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2233                               struct sg_table *pages)
2234 {
2235         struct sgt_iter sgt_iter;
2236         struct page *page;
2237
2238         __i915_gem_object_release_shmem(obj, pages);
2239
2240         i915_gem_gtt_finish_pages(obj, pages);
2241
2242         if (i915_gem_object_needs_bit17_swizzle(obj))
2243                 i915_gem_object_save_bit_17_swizzle(obj, pages);
2244
2245         for_each_sgt_page(page, sgt_iter, pages) {
2246                 if (obj->mm.dirty)
2247                         set_page_dirty(page);
2248
2249                 if (obj->mm.madv == I915_MADV_WILLNEED)
2250                         mark_page_accessed(page);
2251
2252                 put_page(page);
2253         }
2254         obj->mm.dirty = false;
2255
2256         sg_free_table(pages);
2257         kfree(pages);
2258 }
2259
2260 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2261 {
2262         struct radix_tree_iter iter;
2263         void **slot;
2264
2265         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2266                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2267 }
2268
2269 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2270                                  enum i915_mm_subclass subclass)
2271 {
2272         struct sg_table *pages;
2273
2274         if (i915_gem_object_has_pinned_pages(obj))
2275                 return;
2276
2277         GEM_BUG_ON(obj->bind_count);
2278         if (!READ_ONCE(obj->mm.pages))
2279                 return;
2280
2281         /* May be called by shrinker from within get_pages() (on another bo) */
2282         mutex_lock_nested(&obj->mm.lock, subclass);
2283         if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2284                 goto unlock;
2285
2286         /* ->put_pages might need to allocate memory for the bit17 swizzle
2287          * array, hence protect them from being reaped by removing them from gtt
2288          * lists early. */
2289         pages = fetch_and_zero(&obj->mm.pages);
2290         GEM_BUG_ON(!pages);
2291
2292         if (obj->mm.mapping) {
2293                 void *ptr;
2294
2295                 ptr = ptr_mask_bits(obj->mm.mapping);
2296                 if (is_vmalloc_addr(ptr))
2297                         vunmap(ptr);
2298                 else
2299                         kunmap(kmap_to_page(ptr));
2300
2301                 obj->mm.mapping = NULL;
2302         }
2303
2304         __i915_gem_object_reset_page_iter(obj);
2305
2306         obj->ops->put_pages(obj, pages);
2307 unlock:
2308         mutex_unlock(&obj->mm.lock);
2309 }
2310
2311 static unsigned int swiotlb_max_size(void)
2312 {
2313 #if IS_ENABLED(CONFIG_SWIOTLB)
2314         return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
2315 #else
2316         return 0;
2317 #endif
2318 }
2319
2320 static void i915_sg_trim(struct sg_table *orig_st)
2321 {
2322         struct sg_table new_st;
2323         struct scatterlist *sg, *new_sg;
2324         unsigned int i;
2325
2326         if (orig_st->nents == orig_st->orig_nents)
2327                 return;
2328
2329         if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
2330                 return;
2331
2332         new_sg = new_st.sgl;
2333         for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2334                 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2335                 /* called before being DMA mapped, no need to copy sg->dma_* */
2336                 new_sg = sg_next(new_sg);
2337         }
2338
2339         sg_free_table(orig_st);
2340
2341         *orig_st = new_st;
2342 }
2343
2344 static struct sg_table *
2345 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2346 {
2347         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2348         int page_count, i;
2349         struct address_space *mapping;
2350         struct sg_table *st;
2351         struct scatterlist *sg;
2352         struct sgt_iter sgt_iter;
2353         struct page *page;
2354         unsigned long last_pfn = 0;     /* suppress gcc warning */
2355         unsigned int max_segment;
2356         int ret;
2357         gfp_t gfp;
2358
2359         /* Assert that the object is not currently in any GPU domain. As it
2360          * wasn't in the GTT, there shouldn't be any way it could have been in
2361          * a GPU cache
2362          */
2363         GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2364         GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2365
2366         max_segment = swiotlb_max_size();
2367         if (!max_segment)
2368                 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2369
2370         st = kmalloc(sizeof(*st), GFP_KERNEL);
2371         if (st == NULL)
2372                 return ERR_PTR(-ENOMEM);
2373
2374         page_count = obj->base.size / PAGE_SIZE;
2375         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2376                 kfree(st);
2377                 return ERR_PTR(-ENOMEM);
2378         }
2379
2380         /* Get the list of pages out of our struct file.  They'll be pinned
2381          * at this point until we release them.
2382          *
2383          * Fail silently without starting the shrinker
2384          */
2385         mapping = obj->base.filp->f_mapping;
2386         gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2387         gfp |= __GFP_NORETRY | __GFP_NOWARN;
2388         sg = st->sgl;
2389         st->nents = 0;
2390         for (i = 0; i < page_count; i++) {
2391                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2392                 if (IS_ERR(page)) {
2393                         i915_gem_shrink(dev_priv,
2394                                         page_count,
2395                                         I915_SHRINK_BOUND |
2396                                         I915_SHRINK_UNBOUND |
2397                                         I915_SHRINK_PURGEABLE);
2398                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2399                 }
2400                 if (IS_ERR(page)) {
2401                         /* We've tried hard to allocate the memory by reaping
2402                          * our own buffer, now let the real VM do its job and
2403                          * go down in flames if truly OOM.
2404                          */
2405                         page = shmem_read_mapping_page(mapping, i);
2406                         if (IS_ERR(page)) {
2407                                 ret = PTR_ERR(page);
2408                                 goto err_sg;
2409                         }
2410                 }
2411                 if (!i ||
2412                     sg->length >= max_segment ||
2413                     page_to_pfn(page) != last_pfn + 1) {
2414                         if (i)
2415                                 sg = sg_next(sg);
2416                         st->nents++;
2417                         sg_set_page(sg, page, PAGE_SIZE, 0);
2418                 } else {
2419                         sg->length += PAGE_SIZE;
2420                 }
2421                 last_pfn = page_to_pfn(page);
2422
2423                 /* Check that the i965g/gm workaround works. */
2424                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2425         }
2426         if (sg) /* loop terminated early; short sg table */
2427                 sg_mark_end(sg);
2428
2429         /* Trim unused sg entries to avoid wasting memory. */
2430         i915_sg_trim(st);
2431
2432         ret = i915_gem_gtt_prepare_pages(obj, st);
2433         if (ret)
2434                 goto err_pages;
2435
2436         if (i915_gem_object_needs_bit17_swizzle(obj))
2437                 i915_gem_object_do_bit_17_swizzle(obj, st);
2438
2439         return st;
2440
2441 err_sg:
2442         sg_mark_end(sg);
2443 err_pages:
2444         for_each_sgt_page(page, sgt_iter, st)
2445                 put_page(page);
2446         sg_free_table(st);
2447         kfree(st);
2448
2449         /* shmemfs first checks if there is enough memory to allocate the page
2450          * and reports ENOSPC should there be insufficient, along with the usual
2451          * ENOMEM for a genuine allocation failure.
2452          *
2453          * We use ENOSPC in our driver to mean that we have run out of aperture
2454          * space and so want to translate the error from shmemfs back to our
2455          * usual understanding of ENOMEM.
2456          */
2457         if (ret == -ENOSPC)
2458                 ret = -ENOMEM;
2459
2460         return ERR_PTR(ret);
2461 }
2462
2463 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2464                                  struct sg_table *pages)
2465 {
2466         lockdep_assert_held(&obj->mm.lock);
2467
2468         obj->mm.get_page.sg_pos = pages->sgl;
2469         obj->mm.get_page.sg_idx = 0;
2470
2471         obj->mm.pages = pages;
2472
2473         if (i915_gem_object_is_tiled(obj) &&
2474             to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2475                 GEM_BUG_ON(obj->mm.quirked);
2476                 __i915_gem_object_pin_pages(obj);
2477                 obj->mm.quirked = true;
2478         }
2479 }
2480
2481 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2482 {
2483         struct sg_table *pages;
2484
2485         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2486
2487         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2488                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2489                 return -EFAULT;
2490         }
2491
2492         pages = obj->ops->get_pages(obj);
2493         if (unlikely(IS_ERR(pages)))
2494                 return PTR_ERR(pages);
2495
2496         __i915_gem_object_set_pages(obj, pages);
2497         return 0;
2498 }
2499
2500 /* Ensure that the associated pages are gathered from the backing storage
2501  * and pinned into our object. i915_gem_object_pin_pages() may be called
2502  * multiple times before they are released by a single call to
2503  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2504  * either as a result of memory pressure (reaping pages under the shrinker)
2505  * or as the object is itself released.
2506  */
2507 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2508 {
2509         int err;
2510
2511         err = mutex_lock_interruptible(&obj->mm.lock);
2512         if (err)
2513                 return err;
2514
2515         if (unlikely(!obj->mm.pages)) {
2516                 err = ____i915_gem_object_get_pages(obj);
2517                 if (err)
2518                         goto unlock;
2519
2520                 smp_mb__before_atomic();
2521         }
2522         atomic_inc(&obj->mm.pages_pin_count);
2523
2524 unlock:
2525         mutex_unlock(&obj->mm.lock);
2526         return err;
2527 }
2528
2529 /* The 'mapping' part of i915_gem_object_pin_map() below */
2530 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2531                                  enum i915_map_type type)
2532 {
2533         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2534         struct sg_table *sgt = obj->mm.pages;
2535         struct sgt_iter sgt_iter;
2536         struct page *page;
2537         struct page *stack_pages[32];
2538         struct page **pages = stack_pages;
2539         unsigned long i = 0;
2540         pgprot_t pgprot;
2541         void *addr;
2542
2543         /* A single page can always be kmapped */
2544         if (n_pages == 1 && type == I915_MAP_WB)
2545                 return kmap(sg_page(sgt->sgl));
2546
2547         if (n_pages > ARRAY_SIZE(stack_pages)) {
2548                 /* Too big for stack -- allocate temporary array instead */
2549                 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2550                 if (!pages)
2551                         return NULL;
2552         }
2553
2554         for_each_sgt_page(page, sgt_iter, sgt)
2555                 pages[i++] = page;
2556
2557         /* Check that we have the expected number of pages */
2558         GEM_BUG_ON(i != n_pages);
2559
2560         switch (type) {
2561         case I915_MAP_WB:
2562                 pgprot = PAGE_KERNEL;
2563                 break;
2564         case I915_MAP_WC:
2565                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2566                 break;
2567         }
2568         addr = vmap(pages, n_pages, 0, pgprot);
2569
2570         if (pages != stack_pages)
2571                 drm_free_large(pages);
2572
2573         return addr;
2574 }
2575
2576 /* get, pin, and map the pages of the object into kernel space */
2577 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2578                               enum i915_map_type type)
2579 {
2580         enum i915_map_type has_type;
2581         bool pinned;
2582         void *ptr;
2583         int ret;
2584
2585         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2586
2587         ret = mutex_lock_interruptible(&obj->mm.lock);
2588         if (ret)
2589                 return ERR_PTR(ret);
2590
2591         pinned = true;
2592         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2593                 if (unlikely(!obj->mm.pages)) {
2594                         ret = ____i915_gem_object_get_pages(obj);
2595                         if (ret)
2596                                 goto err_unlock;
2597
2598                         smp_mb__before_atomic();
2599                 }
2600                 atomic_inc(&obj->mm.pages_pin_count);
2601                 pinned = false;
2602         }
2603         GEM_BUG_ON(!obj->mm.pages);
2604
2605         ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
2606         if (ptr && has_type != type) {
2607                 if (pinned) {
2608                         ret = -EBUSY;
2609                         goto err_unpin;
2610                 }
2611
2612                 if (is_vmalloc_addr(ptr))
2613                         vunmap(ptr);
2614                 else
2615                         kunmap(kmap_to_page(ptr));
2616
2617                 ptr = obj->mm.mapping = NULL;
2618         }
2619
2620         if (!ptr) {
2621                 ptr = i915_gem_object_map(obj, type);
2622                 if (!ptr) {
2623                         ret = -ENOMEM;
2624                         goto err_unpin;
2625                 }
2626
2627                 obj->mm.mapping = ptr_pack_bits(ptr, type);
2628         }
2629
2630 out_unlock:
2631         mutex_unlock(&obj->mm.lock);
2632         return ptr;
2633
2634 err_unpin:
2635         atomic_dec(&obj->mm.pages_pin_count);
2636 err_unlock:
2637         ptr = ERR_PTR(ret);
2638         goto out_unlock;
2639 }
2640
2641 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2642 {
2643         if (ctx->banned)
2644                 return true;
2645
2646         if (!ctx->bannable)
2647                 return false;
2648
2649         if (ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD) {
2650                 DRM_DEBUG("context hanging too often, banning!\n");
2651                 return true;
2652         }
2653
2654         return false;
2655 }
2656
2657 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2658 {
2659         ctx->ban_score += CONTEXT_SCORE_GUILTY;
2660
2661         ctx->banned = i915_context_is_banned(ctx);
2662         ctx->guilty_count++;
2663
2664         DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
2665                          ctx->name, ctx->ban_score,
2666                          yesno(ctx->banned));
2667
2668         if (!ctx->banned || IS_ERR_OR_NULL(ctx->file_priv))
2669                 return;
2670
2671         ctx->file_priv->context_bans++;
2672         DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
2673                          ctx->name, ctx->file_priv->context_bans);
2674 }
2675
2676 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
2677 {
2678         ctx->active_count++;
2679 }
2680
2681 struct drm_i915_gem_request *
2682 i915_gem_find_active_request(struct intel_engine_cs *engine)
2683 {
2684         struct drm_i915_gem_request *request;
2685
2686         /* We are called by the error capture and reset at a random
2687          * point in time. In particular, note that neither is crucially
2688          * ordered with an interrupt. After a hang, the GPU is dead and we
2689          * assume that no more writes can happen (we waited long enough for
2690          * all writes that were in transaction to be flushed) - adding an
2691          * extra delay for a recent interrupt is pointless. Hence, we do
2692          * not need an engine->irq_seqno_barrier() before the seqno reads.
2693          */
2694         list_for_each_entry(request, &engine->timeline->requests, link) {
2695                 if (__i915_gem_request_completed(request))
2696                         continue;
2697
2698                 return request;
2699         }
2700
2701         return NULL;
2702 }
2703
2704 static void reset_request(struct drm_i915_gem_request *request)
2705 {
2706         void *vaddr = request->ring->vaddr;
2707         u32 head;
2708
2709         /* As this request likely depends on state from the lost
2710          * context, clear out all the user operations leaving the
2711          * breadcrumb at the end (so we get the fence notifications).
2712          */
2713         head = request->head;
2714         if (request->postfix < head) {
2715                 memset(vaddr + head, 0, request->ring->size - head);
2716                 head = 0;
2717         }
2718         memset(vaddr + head, 0, request->postfix - head);
2719 }
2720
2721 static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2722 {
2723         struct drm_i915_gem_request *request;
2724         struct i915_gem_context *incomplete_ctx;
2725         struct intel_timeline *timeline;
2726         bool ring_hung;
2727
2728         if (engine->irq_seqno_barrier)
2729                 engine->irq_seqno_barrier(engine);
2730
2731         request = i915_gem_find_active_request(engine);
2732         if (!request)
2733                 return;
2734
2735         ring_hung = engine->hangcheck.stalled;
2736         if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
2737                 DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n",
2738                                  engine->name,
2739                                  yesno(ring_hung));
2740                 ring_hung = false;
2741         }
2742
2743         if (ring_hung)
2744                 i915_gem_context_mark_guilty(request->ctx);
2745         else
2746                 i915_gem_context_mark_innocent(request->ctx);
2747
2748         if (!ring_hung)
2749                 return;
2750
2751         DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2752                          engine->name, request->global_seqno);
2753
2754         /* Setup the CS to resume from the breadcrumb of the hung request */
2755         engine->reset_hw(engine, request);
2756
2757         /* Users of the default context do not rely on logical state
2758          * preserved between batches. They have to emit full state on
2759          * every batch and so it is safe to execute queued requests following
2760          * the hang.
2761          *
2762          * Other contexts preserve state, now corrupt. We want to skip all
2763          * queued requests that reference the corrupt context.
2764          */
2765         incomplete_ctx = request->ctx;
2766         if (i915_gem_context_is_default(incomplete_ctx))
2767                 return;
2768
2769         list_for_each_entry_continue(request, &engine->timeline->requests, link)
2770                 if (request->ctx == incomplete_ctx)
2771                         reset_request(request);
2772
2773         timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
2774         list_for_each_entry(request, &timeline->requests, link)
2775                 reset_request(request);
2776 }
2777
2778 void i915_gem_reset(struct drm_i915_private *dev_priv)
2779 {
2780         struct intel_engine_cs *engine;
2781         enum intel_engine_id id;
2782
2783         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2784
2785         i915_gem_retire_requests(dev_priv);
2786
2787         for_each_engine(engine, dev_priv, id)
2788                 i915_gem_reset_engine(engine);
2789
2790         i915_gem_restore_fences(dev_priv);
2791
2792         if (dev_priv->gt.awake) {
2793                 intel_sanitize_gt_powersave(dev_priv);
2794                 intel_enable_gt_powersave(dev_priv);
2795                 if (INTEL_GEN(dev_priv) >= 6)
2796                         gen6_rps_busy(dev_priv);
2797         }
2798 }
2799
2800 static void nop_submit_request(struct drm_i915_gem_request *request)
2801 {
2802         i915_gem_request_submit(request);
2803         intel_engine_init_global_seqno(request->engine, request->global_seqno);
2804 }
2805
2806 static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
2807 {
2808         /* We need to be sure that no thread is running the old callback as
2809          * we install the nop handler (otherwise we would submit a request
2810          * to hardware that will never complete). In order to prevent this
2811          * race, we wait until the machine is idle before making the swap
2812          * (using stop_machine()).
2813          */
2814         engine->submit_request = nop_submit_request;
2815
2816         /* Mark all pending requests as complete so that any concurrent
2817          * (lockless) lookup doesn't try and wait upon the request as we
2818          * reset it.
2819          */
2820         intel_engine_init_global_seqno(engine,
2821                                        intel_engine_last_submit(engine));
2822
2823         /*
2824          * Clear the execlists queue up before freeing the requests, as those
2825          * are the ones that keep the context and ringbuffer backing objects
2826          * pinned in place.
2827          */
2828
2829         if (i915.enable_execlists) {
2830                 unsigned long flags;
2831
2832                 spin_lock_irqsave(&engine->timeline->lock, flags);
2833
2834                 i915_gem_request_put(engine->execlist_port[0].request);
2835                 i915_gem_request_put(engine->execlist_port[1].request);
2836                 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2837                 engine->execlist_queue = RB_ROOT;
2838                 engine->execlist_first = NULL;
2839
2840                 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2841         }
2842 }
2843
2844 static int __i915_gem_set_wedged_BKL(void *data)
2845 {
2846         struct drm_i915_private *i915 = data;
2847         struct intel_engine_cs *engine;
2848         enum intel_engine_id id;
2849
2850         for_each_engine(engine, i915, id)
2851                 i915_gem_cleanup_engine(engine);
2852
2853         return 0;
2854 }
2855
2856 void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2857 {
2858         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2859         set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2860
2861         stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
2862
2863         i915_gem_context_lost(dev_priv);
2864         i915_gem_retire_requests(dev_priv);
2865
2866         mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2867 }
2868
2869 static void
2870 i915_gem_retire_work_handler(struct work_struct *work)
2871 {
2872         struct drm_i915_private *dev_priv =
2873                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2874         struct drm_device *dev = &dev_priv->drm;
2875
2876         /* Come back later if the device is busy... */
2877         if (mutex_trylock(&dev->struct_mutex)) {
2878                 i915_gem_retire_requests(dev_priv);
2879                 mutex_unlock(&dev->struct_mutex);
2880         }
2881
2882         /* Keep the retire handler running until we are finally idle.
2883          * We do not need to do this test under locking as in the worst-case
2884          * we queue the retire worker once too often.
2885          */
2886         if (READ_ONCE(dev_priv->gt.awake)) {
2887                 i915_queue_hangcheck(dev_priv);
2888                 queue_delayed_work(dev_priv->wq,
2889                                    &dev_priv->gt.retire_work,
2890                                    round_jiffies_up_relative(HZ));
2891         }
2892 }
2893
2894 static void
2895 i915_gem_idle_work_handler(struct work_struct *work)
2896 {
2897         struct drm_i915_private *dev_priv =
2898                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2899         struct drm_device *dev = &dev_priv->drm;
2900         struct intel_engine_cs *engine;
2901         enum intel_engine_id id;
2902         bool rearm_hangcheck;
2903
2904         if (!READ_ONCE(dev_priv->gt.awake))
2905                 return;
2906
2907         /*
2908          * Wait for last execlists context complete, but bail out in case a
2909          * new request is submitted.
2910          */
2911         wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
2912                  intel_execlists_idle(dev_priv), 10);
2913
2914         if (READ_ONCE(dev_priv->gt.active_requests))
2915                 return;
2916
2917         rearm_hangcheck =
2918                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2919
2920         if (!mutex_trylock(&dev->struct_mutex)) {
2921                 /* Currently busy, come back later */
2922                 mod_delayed_work(dev_priv->wq,
2923                                  &dev_priv->gt.idle_work,
2924                                  msecs_to_jiffies(50));
2925                 goto out_rearm;
2926         }
2927
2928         /*
2929          * New request retired after this work handler started, extend active
2930          * period until next instance of the work.
2931          */
2932         if (work_pending(work))
2933                 goto out_unlock;
2934
2935         if (dev_priv->gt.active_requests)
2936                 goto out_unlock;
2937
2938         if (wait_for(intel_execlists_idle(dev_priv), 10))
2939                 DRM_ERROR("Timeout waiting for engines to idle\n");
2940
2941         for_each_engine(engine, dev_priv, id)
2942                 i915_gem_batch_pool_fini(&engine->batch_pool);
2943
2944         GEM_BUG_ON(!dev_priv->gt.awake);
2945         dev_priv->gt.awake = false;
2946         rearm_hangcheck = false;
2947
2948         if (INTEL_GEN(dev_priv) >= 6)
2949                 gen6_rps_idle(dev_priv);
2950         intel_runtime_pm_put(dev_priv);
2951 out_unlock:
2952         mutex_unlock(&dev->struct_mutex);
2953
2954 out_rearm:
2955         if (rearm_hangcheck) {
2956                 GEM_BUG_ON(!dev_priv->gt.awake);
2957                 i915_queue_hangcheck(dev_priv);
2958         }
2959 }
2960
2961 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2962 {
2963         struct drm_i915_gem_object *obj = to_intel_bo(gem);
2964         struct drm_i915_file_private *fpriv = file->driver_priv;
2965         struct i915_vma *vma, *vn;
2966
2967         mutex_lock(&obj->base.dev->struct_mutex);
2968         list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2969                 if (vma->vm->file == fpriv)
2970                         i915_vma_close(vma);
2971
2972         if (i915_gem_object_is_active(obj) &&
2973             !i915_gem_object_has_active_reference(obj)) {
2974                 i915_gem_object_set_active_reference(obj);
2975                 i915_gem_object_get(obj);
2976         }
2977         mutex_unlock(&obj->base.dev->struct_mutex);
2978 }
2979
2980 static unsigned long to_wait_timeout(s64 timeout_ns)
2981 {
2982         if (timeout_ns < 0)
2983                 return MAX_SCHEDULE_TIMEOUT;
2984
2985         if (timeout_ns == 0)
2986                 return 0;
2987
2988         return nsecs_to_jiffies_timeout(timeout_ns);
2989 }
2990
2991 /**
2992  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2993  * @dev: drm device pointer
2994  * @data: ioctl data blob
2995  * @file: drm file pointer
2996  *
2997  * Returns 0 if successful, else an error is returned with the remaining time in
2998  * the timeout parameter.
2999  *  -ETIME: object is still busy after timeout
3000  *  -ERESTARTSYS: signal interrupted the wait
3001  *  -ENONENT: object doesn't exist
3002  * Also possible, but rare:
3003  *  -EAGAIN: GPU wedged
3004  *  -ENOMEM: damn
3005  *  -ENODEV: Internal IRQ fail
3006  *  -E?: The add request failed
3007  *
3008  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3009  * non-zero timeout parameter the wait ioctl will wait for the given number of
3010  * nanoseconds on an object becoming unbusy. Since the wait itself does so
3011  * without holding struct_mutex the object may become re-busied before this
3012  * function completes. A similar but shorter * race condition exists in the busy
3013  * ioctl
3014  */
3015 int
3016 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3017 {
3018         struct drm_i915_gem_wait *args = data;
3019         struct drm_i915_gem_object *obj;
3020         ktime_t start;
3021         long ret;
3022
3023         if (args->flags != 0)
3024                 return -EINVAL;
3025
3026         obj = i915_gem_object_lookup(file, args->bo_handle);
3027         if (!obj)
3028                 return -ENOENT;
3029
3030         start = ktime_get();
3031
3032         ret = i915_gem_object_wait(obj,
3033                                    I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
3034                                    to_wait_timeout(args->timeout_ns),
3035                                    to_rps_client(file));
3036
3037         if (args->timeout_ns > 0) {
3038                 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3039                 if (args->timeout_ns < 0)
3040                         args->timeout_ns = 0;
3041         }
3042
3043         i915_gem_object_put(obj);
3044         return ret;
3045 }
3046
3047 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3048 {
3049         int ret, i;
3050
3051         for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3052                 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3053                 if (ret)
3054                         return ret;
3055         }
3056
3057         return 0;
3058 }
3059
3060 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3061 {
3062         int ret;
3063
3064         if (flags & I915_WAIT_LOCKED) {
3065                 struct i915_gem_timeline *tl;
3066
3067                 lockdep_assert_held(&i915->drm.struct_mutex);
3068
3069                 list_for_each_entry(tl, &i915->gt.timelines, link) {
3070                         ret = wait_for_timeline(tl, flags);
3071                         if (ret)
3072                                 return ret;
3073                 }
3074         } else {
3075                 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3076                 if (ret)
3077                         return ret;
3078         }
3079
3080         return 0;
3081 }
3082
3083 void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3084                              bool force)
3085 {
3086         /* If we don't have a page list set up, then we're not pinned
3087          * to GPU, and we can ignore the cache flush because it'll happen
3088          * again at bind time.
3089          */
3090         if (!obj->mm.pages)
3091                 return;
3092
3093         /*
3094          * Stolen memory is always coherent with the GPU as it is explicitly
3095          * marked as wc by the system, or the system is cache-coherent.
3096          */
3097         if (obj->stolen || obj->phys_handle)
3098                 return;
3099
3100         /* If the GPU is snooping the contents of the CPU cache,
3101          * we do not need to manually clear the CPU cache lines.  However,
3102          * the caches are only snooped when the render cache is
3103          * flushed/invalidated.  As we always have to emit invalidations
3104          * and flushes when moving into and out of the RENDER domain, correct
3105          * snooping behaviour occurs naturally as the result of our domain
3106          * tracking.
3107          */
3108         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3109                 obj->cache_dirty = true;
3110                 return;
3111         }
3112
3113         trace_i915_gem_object_clflush(obj);
3114         drm_clflush_sg(obj->mm.pages);
3115         obj->cache_dirty = false;
3116 }
3117
3118 /** Flushes the GTT write domain for the object if it's dirty. */
3119 static void
3120 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3121 {
3122         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3123
3124         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3125                 return;
3126
3127         /* No actual flushing is required for the GTT write domain.  Writes
3128          * to it "immediately" go to main memory as far as we know, so there's
3129          * no chipset flush.  It also doesn't land in render cache.
3130          *
3131          * However, we do have to enforce the order so that all writes through
3132          * the GTT land before any writes to the device, such as updates to
3133          * the GATT itself.
3134          *
3135          * We also have to wait a bit for the writes to land from the GTT.
3136          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3137          * timing. This issue has only been observed when switching quickly
3138          * between GTT writes and CPU reads from inside the kernel on recent hw,
3139          * and it appears to only affect discrete GTT blocks (i.e. on LLC
3140          * system agents we cannot reproduce this behaviour).
3141          */
3142         wmb();
3143         if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3144                 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3145
3146         intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3147
3148         obj->base.write_domain = 0;
3149         trace_i915_gem_object_change_domain(obj,
3150                                             obj->base.read_domains,
3151                                             I915_GEM_DOMAIN_GTT);
3152 }
3153
3154 /** Flushes the CPU write domain for the object if it's dirty. */
3155 static void
3156 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3157 {
3158         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3159                 return;
3160
3161         i915_gem_clflush_object(obj, obj->pin_display);
3162         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3163
3164         obj->base.write_domain = 0;
3165         trace_i915_gem_object_change_domain(obj,
3166                                             obj->base.read_domains,
3167                                             I915_GEM_DOMAIN_CPU);
3168 }
3169
3170 /**
3171  * Moves a single object to the GTT read, and possibly write domain.
3172  * @obj: object to act on
3173  * @write: ask for write access or read only
3174  *
3175  * This function returns when the move is complete, including waiting on
3176  * flushes to occur.
3177  */
3178 int
3179 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3180 {
3181         uint32_t old_write_domain, old_read_domains;
3182         int ret;
3183
3184         lockdep_assert_held(&obj->base.dev->struct_mutex);
3185
3186         ret = i915_gem_object_wait(obj,
3187                                    I915_WAIT_INTERRUPTIBLE |
3188                                    I915_WAIT_LOCKED |
3189                                    (write ? I915_WAIT_ALL : 0),
3190                                    MAX_SCHEDULE_TIMEOUT,
3191                                    NULL);
3192         if (ret)
3193                 return ret;
3194
3195         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3196                 return 0;
3197
3198         /* Flush and acquire obj->pages so that we are coherent through
3199          * direct access in memory with previous cached writes through
3200          * shmemfs and that our cache domain tracking remains valid.
3201          * For example, if the obj->filp was moved to swap without us
3202          * being notified and releasing the pages, we would mistakenly
3203          * continue to assume that the obj remained out of the CPU cached
3204          * domain.
3205          */
3206         ret = i915_gem_object_pin_pages(obj);
3207         if (ret)
3208                 return ret;
3209
3210         i915_gem_object_flush_cpu_write_domain(obj);
3211
3212         /* Serialise direct access to this object with the barriers for
3213          * coherent writes from the GPU, by effectively invalidating the
3214          * GTT domain upon first access.
3215          */
3216         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3217                 mb();
3218
3219         old_write_domain = obj->base.write_domain;
3220         old_read_domains = obj->base.read_domains;
3221
3222         /* It should now be out of any other write domains, and we can update
3223          * the domain values for our changes.
3224          */
3225         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3226         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3227         if (write) {
3228                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3229                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3230                 obj->mm.dirty = true;
3231         }
3232
3233         trace_i915_gem_object_change_domain(obj,
3234                                             old_read_domains,
3235                                             old_write_domain);
3236
3237         i915_gem_object_unpin_pages(obj);
3238         return 0;
3239 }
3240
3241 /**
3242  * Changes the cache-level of an object across all VMA.
3243  * @obj: object to act on
3244  * @cache_level: new cache level to set for the object
3245  *
3246  * After this function returns, the object will be in the new cache-level
3247  * across all GTT and the contents of the backing storage will be coherent,
3248  * with respect to the new cache-level. In order to keep the backing storage
3249  * coherent for all users, we only allow a single cache level to be set
3250  * globally on the object and prevent it from being changed whilst the
3251  * hardware is reading from the object. That is if the object is currently
3252  * on the scanout it will be set to uncached (or equivalent display
3253  * cache coherency) and all non-MOCS GPU access will also be uncached so
3254  * that all direct access to the scanout remains coherent.
3255  */
3256 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3257                                     enum i915_cache_level cache_level)
3258 {
3259         struct i915_vma *vma;
3260         int ret;
3261
3262         lockdep_assert_held(&obj->base.dev->struct_mutex);
3263
3264         if (obj->cache_level == cache_level)
3265                 return 0;
3266
3267         /* Inspect the list of currently bound VMA and unbind any that would
3268          * be invalid given the new cache-level. This is principally to
3269          * catch the issue of the CS prefetch crossing page boundaries and
3270          * reading an invalid PTE on older architectures.
3271          */
3272 restart:
3273         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3274                 if (!drm_mm_node_allocated(&vma->node))
3275                         continue;
3276
3277                 if (i915_vma_is_pinned(vma)) {
3278                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3279                         return -EBUSY;
3280                 }
3281
3282                 if (i915_gem_valid_gtt_space(vma, cache_level))
3283                         continue;
3284
3285                 ret = i915_vma_unbind(vma);
3286                 if (ret)
3287                         return ret;
3288
3289                 /* As unbinding may affect other elements in the
3290                  * obj->vma_list (due to side-effects from retiring
3291                  * an active vma), play safe and restart the iterator.
3292                  */
3293                 goto restart;
3294         }
3295
3296         /* We can reuse the existing drm_mm nodes but need to change the
3297          * cache-level on the PTE. We could simply unbind them all and
3298          * rebind with the correct cache-level on next use. However since
3299          * we already have a valid slot, dma mapping, pages etc, we may as
3300          * rewrite the PTE in the belief that doing so tramples upon less
3301          * state and so involves less work.
3302          */
3303         if (obj->bind_count) {
3304                 /* Before we change the PTE, the GPU must not be accessing it.
3305                  * If we wait upon the object, we know that all the bound
3306                  * VMA are no longer active.
3307                  */
3308                 ret = i915_gem_object_wait(obj,
3309                                            I915_WAIT_INTERRUPTIBLE |
3310                                            I915_WAIT_LOCKED |
3311                                            I915_WAIT_ALL,
3312                                            MAX_SCHEDULE_TIMEOUT,
3313                                            NULL);
3314                 if (ret)
3315                         return ret;
3316
3317                 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3318                     cache_level != I915_CACHE_NONE) {
3319                         /* Access to snoopable pages through the GTT is
3320                          * incoherent and on some machines causes a hard
3321                          * lockup. Relinquish the CPU mmaping to force
3322                          * userspace to refault in the pages and we can
3323                          * then double check if the GTT mapping is still
3324                          * valid for that pointer access.
3325                          */
3326                         i915_gem_release_mmap(obj);
3327
3328                         /* As we no longer need a fence for GTT access,
3329                          * we can relinquish it now (and so prevent having
3330                          * to steal a fence from someone else on the next
3331                          * fence request). Note GPU activity would have
3332                          * dropped the fence as all snoopable access is
3333                          * supposed to be linear.
3334                          */
3335                         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3336                                 ret = i915_vma_put_fence(vma);
3337                                 if (ret)
3338                                         return ret;
3339                         }
3340                 } else {
3341                         /* We either have incoherent backing store and
3342                          * so no GTT access or the architecture is fully
3343                          * coherent. In such cases, existing GTT mmaps
3344                          * ignore the cache bit in the PTE and we can
3345                          * rewrite it without confusing the GPU or having
3346                          * to force userspace to fault back in its mmaps.
3347                          */
3348                 }
3349
3350                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3351                         if (!drm_mm_node_allocated(&vma->node))
3352                                 continue;
3353
3354                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3355                         if (ret)
3356                                 return ret;
3357                 }
3358         }
3359
3360         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
3361             cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3362                 obj->cache_dirty = true;
3363
3364         list_for_each_entry(vma, &obj->vma_list, obj_link)
3365                 vma->node.color = cache_level;
3366         obj->cache_level = cache_level;
3367
3368         return 0;
3369 }
3370
3371 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3372                                struct drm_file *file)
3373 {
3374         struct drm_i915_gem_caching *args = data;
3375         struct drm_i915_gem_object *obj;
3376         int err = 0;
3377
3378         rcu_read_lock();
3379         obj = i915_gem_object_lookup_rcu(file, args->handle);
3380         if (!obj) {
3381                 err = -ENOENT;
3382                 goto out;
3383         }
3384
3385         switch (obj->cache_level) {
3386         case I915_CACHE_LLC:
3387         case I915_CACHE_L3_LLC:
3388                 args->caching = I915_CACHING_CACHED;
3389                 break;
3390
3391         case I915_CACHE_WT:
3392                 args->caching = I915_CACHING_DISPLAY;
3393                 break;
3394
3395         default:
3396                 args->caching = I915_CACHING_NONE;
3397                 break;
3398         }
3399 out:
3400         rcu_read_unlock();
3401         return err;
3402 }
3403
3404 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3405                                struct drm_file *file)
3406 {
3407         struct drm_i915_private *i915 = to_i915(dev);
3408         struct drm_i915_gem_caching *args = data;
3409         struct drm_i915_gem_object *obj;
3410         enum i915_cache_level level;
3411         int ret;
3412
3413         switch (args->caching) {
3414         case I915_CACHING_NONE:
3415                 level = I915_CACHE_NONE;
3416                 break;
3417         case I915_CACHING_CACHED:
3418                 /*
3419                  * Due to a HW issue on BXT A stepping, GPU stores via a
3420                  * snooped mapping may leave stale data in a corresponding CPU
3421                  * cacheline, whereas normally such cachelines would get
3422                  * invalidated.
3423                  */
3424                 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3425                         return -ENODEV;
3426
3427                 level = I915_CACHE_LLC;
3428                 break;
3429         case I915_CACHING_DISPLAY:
3430                 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3431                 break;
3432         default:
3433                 return -EINVAL;
3434         }
3435
3436         ret = i915_mutex_lock_interruptible(dev);
3437         if (ret)
3438                 return ret;
3439
3440         obj = i915_gem_object_lookup(file, args->handle);
3441         if (!obj) {
3442                 ret = -ENOENT;
3443                 goto unlock;
3444         }
3445
3446         ret = i915_gem_object_set_cache_level(obj, level);
3447         i915_gem_object_put(obj);
3448 unlock:
3449         mutex_unlock(&dev->struct_mutex);
3450         return ret;
3451 }
3452
3453 /*
3454  * Prepare buffer for display plane (scanout, cursors, etc).
3455  * Can be called from an uninterruptible phase (modesetting) and allows
3456  * any flushes to be pipelined (for pageflips).
3457  */
3458 struct i915_vma *
3459 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3460                                      u32 alignment,
3461                                      const struct i915_ggtt_view *view)
3462 {
3463         struct i915_vma *vma;
3464         u32 old_read_domains, old_write_domain;
3465         int ret;
3466
3467         lockdep_assert_held(&obj->base.dev->struct_mutex);
3468
3469         /* Mark the pin_display early so that we account for the
3470          * display coherency whilst setting up the cache domains.
3471          */
3472         obj->pin_display++;
3473
3474         /* The display engine is not coherent with the LLC cache on gen6.  As
3475          * a result, we make sure that the pinning that is about to occur is
3476          * done with uncached PTEs. This is lowest common denominator for all
3477          * chipsets.
3478          *
3479          * However for gen6+, we could do better by using the GFDT bit instead
3480          * of uncaching, which would allow us to flush all the LLC-cached data
3481          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3482          */
3483         ret = i915_gem_object_set_cache_level(obj,
3484                                               HAS_WT(to_i915(obj->base.dev)) ?
3485                                               I915_CACHE_WT : I915_CACHE_NONE);
3486         if (ret) {
3487                 vma = ERR_PTR(ret);
3488                 goto err_unpin_display;
3489         }
3490
3491         /* As the user may map the buffer once pinned in the display plane
3492          * (e.g. libkms for the bootup splash), we have to ensure that we
3493          * always use map_and_fenceable for all scanout buffers. However,
3494          * it may simply be too big to fit into mappable, in which case
3495          * put it anyway and hope that userspace can cope (but always first
3496          * try to preserve the existing ABI).
3497          */
3498         vma = ERR_PTR(-ENOSPC);
3499         if (view->type == I915_GGTT_VIEW_NORMAL)
3500                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3501                                                PIN_MAPPABLE | PIN_NONBLOCK);
3502         if (IS_ERR(vma)) {
3503                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3504                 unsigned int flags;
3505
3506                 /* Valleyview is definitely limited to scanning out the first
3507                  * 512MiB. Lets presume this behaviour was inherited from the
3508                  * g4x display engine and that all earlier gen are similarly
3509                  * limited. Testing suggests that it is a little more
3510                  * complicated than this. For example, Cherryview appears quite
3511                  * happy to scanout from anywhere within its global aperture.
3512                  */
3513                 flags = 0;
3514                 if (HAS_GMCH_DISPLAY(i915))
3515                         flags = PIN_MAPPABLE;
3516                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3517         }
3518         if (IS_ERR(vma))
3519                 goto err_unpin_display;
3520
3521         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3522
3523         /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
3524         if (obj->cache_dirty) {
3525                 i915_gem_clflush_object(obj, true);
3526                 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
3527         }
3528
3529         old_write_domain = obj->base.write_domain;
3530         old_read_domains = obj->base.read_domains;
3531
3532         /* It should now be out of any other write domains, and we can update
3533          * the domain values for our changes.
3534          */
3535         obj->base.write_domain = 0;
3536         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3537
3538         trace_i915_gem_object_change_domain(obj,
3539                                             old_read_domains,
3540                                             old_write_domain);
3541
3542         return vma;
3543
3544 err_unpin_display:
3545         obj->pin_display--;
3546         return vma;
3547 }
3548
3549 void
3550 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3551 {
3552         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
3553
3554         if (WARN_ON(vma->obj->pin_display == 0))
3555                 return;
3556
3557         if (--vma->obj->pin_display == 0)
3558                 vma->display_alignment = 0;
3559
3560         /* Bump the LRU to try and avoid premature eviction whilst flipping  */
3561         if (!i915_vma_is_active(vma))
3562                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3563
3564         i915_vma_unpin(vma);
3565 }
3566
3567 /**
3568  * Moves a single object to the CPU read, and possibly write domain.
3569  * @obj: object to act on
3570  * @write: requesting write or read-only access
3571  *
3572  * This function returns when the move is complete, including waiting on
3573  * flushes to occur.
3574  */
3575 int
3576 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3577 {
3578         uint32_t old_write_domain, old_read_domains;
3579         int ret;
3580
3581         lockdep_assert_held(&obj->base.dev->struct_mutex);
3582
3583         ret = i915_gem_object_wait(obj,
3584                                    I915_WAIT_INTERRUPTIBLE |
3585                                    I915_WAIT_LOCKED |
3586                                    (write ? I915_WAIT_ALL : 0),
3587                                    MAX_SCHEDULE_TIMEOUT,
3588                                    NULL);
3589         if (ret)
3590                 return ret;
3591
3592         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3593                 return 0;
3594
3595         i915_gem_object_flush_gtt_write_domain(obj);
3596
3597         old_write_domain = obj->base.write_domain;
3598         old_read_domains = obj->base.read_domains;
3599
3600         /* Flush the CPU cache if it's still invalid. */
3601         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3602                 i915_gem_clflush_object(obj, false);
3603
3604                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3605         }
3606
3607         /* It should now be out of any other write domains, and we can update
3608          * the domain values for our changes.
3609          */
3610         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3611
3612         /* If we're writing through the CPU, then the GPU read domains will
3613          * need to be invalidated at next use.
3614          */
3615         if (write) {
3616                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3617                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3618         }
3619
3620         trace_i915_gem_object_change_domain(obj,
3621                                             old_read_domains,
3622                                             old_write_domain);
3623
3624         return 0;
3625 }
3626
3627 /* Throttle our rendering by waiting until the ring has completed our requests
3628  * emitted over 20 msec ago.
3629  *
3630  * Note that if we were to use the current jiffies each time around the loop,
3631  * we wouldn't escape the function with any frames outstanding if the time to
3632  * render a frame was over 20ms.
3633  *
3634  * This should get us reasonable parallelism between CPU and GPU but also
3635  * relatively low latency when blocking on a particular request to finish.
3636  */
3637 static int
3638 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3639 {
3640         struct drm_i915_private *dev_priv = to_i915(dev);
3641         struct drm_i915_file_private *file_priv = file->driver_priv;
3642         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3643         struct drm_i915_gem_request *request, *target = NULL;
3644         long ret;
3645
3646         /* ABI: return -EIO if already wedged */
3647         if (i915_terminally_wedged(&dev_priv->gpu_error))
3648                 return -EIO;
3649
3650         spin_lock(&file_priv->mm.lock);
3651         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3652                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3653                         break;
3654
3655                 /*
3656                  * Note that the request might not have been submitted yet.
3657                  * In which case emitted_jiffies will be zero.
3658                  */
3659                 if (!request->emitted_jiffies)
3660                         continue;
3661
3662                 target = request;
3663         }
3664         if (target)
3665                 i915_gem_request_get(target);
3666         spin_unlock(&file_priv->mm.lock);
3667
3668         if (target == NULL)
3669                 return 0;
3670
3671         ret = i915_wait_request(target,
3672                                 I915_WAIT_INTERRUPTIBLE,
3673                                 MAX_SCHEDULE_TIMEOUT);
3674         i915_gem_request_put(target);
3675
3676         return ret < 0 ? ret : 0;
3677 }
3678
3679 struct i915_vma *
3680 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3681                          const struct i915_ggtt_view *view,
3682                          u64 size,
3683                          u64 alignment,
3684                          u64 flags)
3685 {
3686         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3687         struct i915_address_space *vm = &dev_priv->ggtt.base;
3688         struct i915_vma *vma;
3689         int ret;
3690
3691         lockdep_assert_held(&obj->base.dev->struct_mutex);
3692
3693         vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3694         if (IS_ERR(vma))
3695                 return vma;
3696
3697         if (i915_vma_misplaced(vma, size, alignment, flags)) {
3698                 if (flags & PIN_NONBLOCK &&
3699                     (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
3700                         return ERR_PTR(-ENOSPC);
3701
3702                 if (flags & PIN_MAPPABLE) {
3703                         u32 fence_size;
3704
3705                         fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
3706                                                             i915_gem_object_get_tiling(obj));
3707                         /* If the required space is larger than the available
3708                          * aperture, we will not able to find a slot for the
3709                          * object and unbinding the object now will be in
3710                          * vain. Worse, doing so may cause us to ping-pong
3711                          * the object in and out of the Global GTT and
3712                          * waste a lot of cycles under the mutex.
3713                          */
3714                         if (fence_size > dev_priv->ggtt.mappable_end)
3715                                 return ERR_PTR(-E2BIG);
3716
3717                         /* If NONBLOCK is set the caller is optimistically
3718                          * trying to cache the full object within the mappable
3719                          * aperture, and *must* have a fallback in place for
3720                          * situations where we cannot bind the object. We
3721                          * can be a little more lax here and use the fallback
3722                          * more often to avoid costly migrations of ourselves
3723                          * and other objects within the aperture.
3724                          *
3725                          * Half-the-aperture is used as a simple heuristic.
3726                          * More interesting would to do search for a free
3727                          * block prior to making the commitment to unbind.
3728                          * That caters for the self-harm case, and with a
3729                          * little more heuristics (e.g. NOFAULT, NOEVICT)
3730                          * we could try to minimise harm to others.
3731                          */
3732                         if (flags & PIN_NONBLOCK &&
3733                             fence_size > dev_priv->ggtt.mappable_end / 2)
3734                                 return ERR_PTR(-ENOSPC);
3735                 }
3736
3737                 WARN(i915_vma_is_pinned(vma),
3738                      "bo is already pinned in ggtt with incorrect alignment:"
3739                      " offset=%08x, req.alignment=%llx,"
3740                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3741                      i915_ggtt_offset(vma), alignment,
3742                      !!(flags & PIN_MAPPABLE),
3743                      i915_vma_is_map_and_fenceable(vma));
3744                 ret = i915_vma_unbind(vma);
3745                 if (ret)
3746                         return ERR_PTR(ret);
3747         }
3748
3749         ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3750         if (ret)
3751                 return ERR_PTR(ret);
3752
3753         return vma;
3754 }
3755
3756 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3757 {
3758         /* Note that we could alias engines in the execbuf API, but
3759          * that would be very unwise as it prevents userspace from
3760          * fine control over engine selection. Ahem.
3761          *
3762          * This should be something like EXEC_MAX_ENGINE instead of
3763          * I915_NUM_ENGINES.
3764          */
3765         BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3766         return 0x10000 << id;
3767 }
3768
3769 static __always_inline unsigned int __busy_write_id(unsigned int id)
3770 {
3771         /* The uABI guarantees an active writer is also amongst the read
3772          * engines. This would be true if we accessed the activity tracking
3773          * under the lock, but as we perform the lookup of the object and
3774          * its activity locklessly we can not guarantee that the last_write
3775          * being active implies that we have set the same engine flag from
3776          * last_read - hence we always set both read and write busy for
3777          * last_write.
3778          */
3779         return id | __busy_read_flag(id);
3780 }
3781
3782 static __always_inline unsigned int
3783 __busy_set_if_active(const struct dma_fence *fence,
3784                      unsigned int (*flag)(unsigned int id))
3785 {
3786         struct drm_i915_gem_request *rq;
3787
3788         /* We have to check the current hw status of the fence as the uABI
3789          * guarantees forward progress. We could rely on the idle worker
3790          * to eventually flush us, but to minimise latency just ask the
3791          * hardware.
3792          *
3793          * Note we only report on the status of native fences.
3794          */
3795         if (!dma_fence_is_i915(fence))
3796                 return 0;
3797
3798         /* opencode to_request() in order to avoid const warnings */
3799         rq = container_of(fence, struct drm_i915_gem_request, fence);
3800         if (i915_gem_request_completed(rq))
3801                 return 0;
3802
3803         return flag(rq->engine->exec_id);
3804 }
3805
3806 static __always_inline unsigned int
3807 busy_check_reader(const struct dma_fence *fence)
3808 {
3809         return __busy_set_if_active(fence, __busy_read_flag);
3810 }
3811
3812 static __always_inline unsigned int
3813 busy_check_writer(const struct dma_fence *fence)
3814 {
3815         if (!fence)
3816                 return 0;
3817
3818         return __busy_set_if_active(fence, __busy_write_id);
3819 }
3820
3821 int
3822 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3823                     struct drm_file *file)
3824 {
3825         struct drm_i915_gem_busy *args = data;
3826         struct drm_i915_gem_object *obj;
3827         struct reservation_object_list *list;
3828         unsigned int seq;
3829         int err;
3830
3831         err = -ENOENT;
3832         rcu_read_lock();
3833         obj = i915_gem_object_lookup_rcu(file, args->handle);
3834         if (!obj)
3835                 goto out;
3836
3837         /* A discrepancy here is that we do not report the status of
3838          * non-i915 fences, i.e. even though we may report the object as idle,
3839          * a call to set-domain may still stall waiting for foreign rendering.
3840          * This also means that wait-ioctl may report an object as busy,
3841          * where busy-ioctl considers it idle.
3842          *
3843          * We trade the ability to warn of foreign fences to report on which
3844          * i915 engines are active for the object.
3845          *
3846          * Alternatively, we can trade that extra information on read/write
3847          * activity with
3848          *      args->busy =
3849          *              !reservation_object_test_signaled_rcu(obj->resv, true);
3850          * to report the overall busyness. This is what the wait-ioctl does.
3851          *
3852          */
3853 retry:
3854         seq = raw_read_seqcount(&obj->resv->seq);
3855
3856         /* Translate the exclusive fence to the READ *and* WRITE engine */
3857         args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3858
3859         /* Translate shared fences to READ set of engines */
3860         list = rcu_dereference(obj->resv->fence);
3861         if (list) {
3862                 unsigned int shared_count = list->shared_count, i;
3863
3864                 for (i = 0; i < shared_count; ++i) {
3865                         struct dma_fence *fence =
3866                                 rcu_dereference(list->shared[i]);
3867
3868                         args->busy |= busy_check_reader(fence);
3869                 }
3870         }
3871
3872         if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
3873                 goto retry;
3874
3875         err = 0;
3876 out:
3877         rcu_read_unlock();
3878         return err;
3879 }
3880
3881 int
3882 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3883                         struct drm_file *file_priv)
3884 {
3885         return i915_gem_ring_throttle(dev, file_priv);
3886 }
3887
3888 int
3889 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3890                        struct drm_file *file_priv)
3891 {
3892         struct drm_i915_private *dev_priv = to_i915(dev);
3893         struct drm_i915_gem_madvise *args = data;
3894         struct drm_i915_gem_object *obj;
3895         int err;
3896
3897         switch (args->madv) {
3898         case I915_MADV_DONTNEED:
3899         case I915_MADV_WILLNEED:
3900             break;
3901         default:
3902             return -EINVAL;
3903         }
3904
3905         obj = i915_gem_object_lookup(file_priv, args->handle);
3906         if (!obj)
3907                 return -ENOENT;
3908
3909         err = mutex_lock_interruptible(&obj->mm.lock);
3910         if (err)
3911                 goto out;
3912
3913         if (obj->mm.pages &&
3914             i915_gem_object_is_tiled(obj) &&
3915             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3916                 if (obj->mm.madv == I915_MADV_WILLNEED) {
3917                         GEM_BUG_ON(!obj->mm.quirked);
3918                         __i915_gem_object_unpin_pages(obj);
3919                         obj->mm.quirked = false;
3920                 }
3921                 if (args->madv == I915_MADV_WILLNEED) {
3922                         GEM_BUG_ON(obj->mm.quirked);
3923                         __i915_gem_object_pin_pages(obj);
3924                         obj->mm.quirked = true;
3925                 }
3926         }
3927
3928         if (obj->mm.madv != __I915_MADV_PURGED)
3929                 obj->mm.madv = args->madv;
3930
3931         /* if the object is no longer attached, discard its backing storage */
3932         if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
3933                 i915_gem_object_truncate(obj);
3934
3935         args->retained = obj->mm.madv != __I915_MADV_PURGED;
3936         mutex_unlock(&obj->mm.lock);
3937
3938 out:
3939         i915_gem_object_put(obj);
3940         return err;
3941 }
3942
3943 static void
3944 frontbuffer_retire(struct i915_gem_active *active,
3945                    struct drm_i915_gem_request *request)
3946 {
3947         struct drm_i915_gem_object *obj =
3948                 container_of(active, typeof(*obj), frontbuffer_write);
3949
3950         intel_fb_obj_flush(obj, true, ORIGIN_CS);
3951 }
3952
3953 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3954                           const struct drm_i915_gem_object_ops *ops)
3955 {
3956         mutex_init(&obj->mm.lock);
3957
3958         INIT_LIST_HEAD(&obj->global_link);
3959         INIT_LIST_HEAD(&obj->userfault_link);
3960         INIT_LIST_HEAD(&obj->obj_exec_link);
3961         INIT_LIST_HEAD(&obj->vma_list);
3962         INIT_LIST_HEAD(&obj->batch_pool_link);
3963
3964         obj->ops = ops;
3965
3966         reservation_object_init(&obj->__builtin_resv);
3967         obj->resv = &obj->__builtin_resv;
3968
3969         obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
3970         init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
3971
3972         obj->mm.madv = I915_MADV_WILLNEED;
3973         INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
3974         mutex_init(&obj->mm.get_page.lock);
3975
3976         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3977 }
3978
3979 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3980         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
3981                  I915_GEM_OBJECT_IS_SHRINKABLE,
3982         .get_pages = i915_gem_object_get_pages_gtt,
3983         .put_pages = i915_gem_object_put_pages_gtt,
3984 };
3985
3986 /* Note we don't consider signbits :| */
3987 #define overflows_type(x, T) \
3988         (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
3989
3990 struct drm_i915_gem_object *
3991 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
3992 {
3993         struct drm_i915_gem_object *obj;
3994         struct address_space *mapping;
3995         gfp_t mask;
3996         int ret;
3997
3998         /* There is a prevalence of the assumption that we fit the object's
3999          * page count inside a 32bit _signed_ variable. Let's document this and
4000          * catch if we ever need to fix it. In the meantime, if you do spot
4001          * such a local variable, please consider fixing!
4002          */
4003         if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
4004                 return ERR_PTR(-E2BIG);
4005
4006         if (overflows_type(size, obj->base.size))
4007                 return ERR_PTR(-E2BIG);
4008
4009         obj = i915_gem_object_alloc(dev_priv);
4010         if (obj == NULL)
4011                 return ERR_PTR(-ENOMEM);
4012
4013         ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
4014         if (ret)
4015                 goto fail;
4016
4017         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4018         if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4019                 /* 965gm cannot relocate objects above 4GiB. */
4020                 mask &= ~__GFP_HIGHMEM;
4021                 mask |= __GFP_DMA32;
4022         }
4023
4024         mapping = obj->base.filp->f_mapping;
4025         mapping_set_gfp_mask(mapping, mask);
4026
4027         i915_gem_object_init(obj, &i915_gem_object_ops);
4028
4029         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4030         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4031
4032         if (HAS_LLC(dev_priv)) {
4033                 /* On some devices, we can have the GPU use the LLC (the CPU
4034                  * cache) for about a 10% performance improvement
4035                  * compared to uncached.  Graphics requests other than
4036                  * display scanout are coherent with the CPU in
4037                  * accessing this cache.  This means in this mode we
4038                  * don't need to clflush on the CPU side, and on the
4039                  * GPU side we only need to flush internal caches to
4040                  * get data visible to the CPU.
4041                  *
4042                  * However, we maintain the display planes as UC, and so
4043                  * need to rebind when first used as such.
4044                  */
4045                 obj->cache_level = I915_CACHE_LLC;
4046         } else
4047                 obj->cache_level = I915_CACHE_NONE;
4048
4049         trace_i915_gem_object_create(obj);
4050
4051         return obj;
4052
4053 fail:
4054         i915_gem_object_free(obj);
4055         return ERR_PTR(ret);
4056 }
4057
4058 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4059 {
4060         /* If we are the last user of the backing storage (be it shmemfs
4061          * pages or stolen etc), we know that the pages are going to be
4062          * immediately released. In this case, we can then skip copying
4063          * back the contents from the GPU.
4064          */
4065
4066         if (obj->mm.madv != I915_MADV_WILLNEED)
4067                 return false;
4068
4069         if (obj->base.filp == NULL)
4070                 return true;
4071
4072         /* At first glance, this looks racy, but then again so would be
4073          * userspace racing mmap against close. However, the first external
4074          * reference to the filp can only be obtained through the
4075          * i915_gem_mmap_ioctl() which safeguards us against the user
4076          * acquiring such a reference whilst we are in the middle of
4077          * freeing the object.
4078          */
4079         return atomic_long_read(&obj->base.filp->f_count) == 1;
4080 }
4081
4082 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4083                                     struct llist_node *freed)
4084 {
4085         struct drm_i915_gem_object *obj, *on;
4086
4087         mutex_lock(&i915->drm.struct_mutex);
4088         intel_runtime_pm_get(i915);
4089         llist_for_each_entry(obj, freed, freed) {
4090                 struct i915_vma *vma, *vn;
4091
4092                 trace_i915_gem_object_destroy(obj);
4093
4094                 GEM_BUG_ON(i915_gem_object_is_active(obj));
4095                 list_for_each_entry_safe(vma, vn,
4096                                          &obj->vma_list, obj_link) {
4097                         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4098                         GEM_BUG_ON(i915_vma_is_active(vma));
4099                         vma->flags &= ~I915_VMA_PIN_MASK;
4100                         i915_vma_close(vma);
4101                 }
4102                 GEM_BUG_ON(!list_empty(&obj->vma_list));
4103                 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4104
4105                 list_del(&obj->global_link);
4106         }
4107         intel_runtime_pm_put(i915);
4108         mutex_unlock(&i915->drm.struct_mutex);
4109
4110         llist_for_each_entry_safe(obj, on, freed, freed) {
4111                 GEM_BUG_ON(obj->bind_count);
4112                 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4113
4114                 if (obj->ops->release)
4115                         obj->ops->release(obj);
4116
4117                 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4118                         atomic_set(&obj->mm.pages_pin_count, 0);
4119                 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4120                 GEM_BUG_ON(obj->mm.pages);
4121
4122                 if (obj->base.import_attach)
4123                         drm_prime_gem_destroy(&obj->base, NULL);
4124
4125                 reservation_object_fini(&obj->__builtin_resv);
4126                 drm_gem_object_release(&obj->base);
4127                 i915_gem_info_remove_obj(i915, obj->base.size);
4128
4129                 kfree(obj->bit_17);
4130                 i915_gem_object_free(obj);
4131         }
4132 }
4133
4134 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4135 {
4136         struct llist_node *freed;
4137
4138         freed = llist_del_all(&i915->mm.free_list);
4139         if (unlikely(freed))
4140                 __i915_gem_free_objects(i915, freed);
4141 }
4142
4143 static void __i915_gem_free_work(struct work_struct *work)
4144 {
4145         struct drm_i915_private *i915 =
4146                 container_of(work, struct drm_i915_private, mm.free_work);
4147         struct llist_node *freed;
4148
4149         /* All file-owned VMA should have been released by this point through
4150          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4151          * However, the object may also be bound into the global GTT (e.g.
4152          * older GPUs without per-process support, or for direct access through
4153          * the GTT either for the user or for scanout). Those VMA still need to
4154          * unbound now.
4155          */
4156
4157         while ((freed = llist_del_all(&i915->mm.free_list)))
4158                 __i915_gem_free_objects(i915, freed);
4159 }
4160
4161 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4162 {
4163         struct drm_i915_gem_object *obj =
4164                 container_of(head, typeof(*obj), rcu);
4165         struct drm_i915_private *i915 = to_i915(obj->base.dev);
4166
4167         /* We can't simply use call_rcu() from i915_gem_free_object()
4168          * as we need to block whilst unbinding, and the call_rcu
4169          * task may be called from softirq context. So we take a
4170          * detour through a worker.
4171          */
4172         if (llist_add(&obj->freed, &i915->mm.free_list))
4173                 schedule_work(&i915->mm.free_work);
4174 }
4175
4176 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4177 {
4178         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4179
4180         if (obj->mm.quirked)
4181                 __i915_gem_object_unpin_pages(obj);
4182
4183         if (discard_backing_storage(obj))
4184                 obj->mm.madv = I915_MADV_DONTNEED;
4185
4186         /* Before we free the object, make sure any pure RCU-only
4187          * read-side critical sections are complete, e.g.
4188          * i915_gem_busy_ioctl(). For the corresponding synchronized
4189          * lookup see i915_gem_object_lookup_rcu().
4190          */
4191         call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4192 }
4193
4194 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4195 {
4196         lockdep_assert_held(&obj->base.dev->struct_mutex);
4197
4198         GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
4199         if (i915_gem_object_is_active(obj))
4200                 i915_gem_object_set_active_reference(obj);
4201         else
4202                 i915_gem_object_put(obj);
4203 }
4204
4205 static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
4206 {
4207         struct intel_engine_cs *engine;
4208         enum intel_engine_id id;
4209
4210         for_each_engine(engine, dev_priv, id)
4211                 GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
4212 }
4213
4214 int i915_gem_suspend(struct drm_i915_private *dev_priv)
4215 {
4216         struct drm_device *dev = &dev_priv->drm;
4217         int ret;
4218
4219         intel_suspend_gt_powersave(dev_priv);
4220
4221         mutex_lock(&dev->struct_mutex);
4222
4223         /* We have to flush all the executing contexts to main memory so
4224          * that they can saved in the hibernation image. To ensure the last
4225          * context image is coherent, we have to switch away from it. That
4226          * leaves the dev_priv->kernel_context still active when
4227          * we actually suspend, and its image in memory may not match the GPU
4228          * state. Fortunately, the kernel_context is disposable and we do
4229          * not rely on its state.
4230          */
4231         ret = i915_gem_switch_to_kernel_context(dev_priv);
4232         if (ret)
4233                 goto err;
4234
4235         ret = i915_gem_wait_for_idle(dev_priv,
4236                                      I915_WAIT_INTERRUPTIBLE |
4237                                      I915_WAIT_LOCKED);
4238         if (ret)
4239                 goto err;
4240
4241         i915_gem_retire_requests(dev_priv);
4242         GEM_BUG_ON(dev_priv->gt.active_requests);
4243
4244         assert_kernel_context_is_current(dev_priv);
4245         i915_gem_context_lost(dev_priv);
4246         mutex_unlock(&dev->struct_mutex);
4247
4248         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4249         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4250         flush_delayed_work(&dev_priv->gt.idle_work);
4251         flush_work(&dev_priv->mm.free_work);
4252
4253         /* Assert that we sucessfully flushed all the work and
4254          * reset the GPU back to its idle, low power state.
4255          */
4256         WARN_ON(dev_priv->gt.awake);
4257         WARN_ON(!intel_execlists_idle(dev_priv));
4258
4259         /*
4260          * Neither the BIOS, ourselves or any other kernel
4261          * expects the system to be in execlists mode on startup,
4262          * so we need to reset the GPU back to legacy mode. And the only
4263          * known way to disable logical contexts is through a GPU reset.
4264          *
4265          * So in order to leave the system in a known default configuration,
4266          * always reset the GPU upon unload and suspend. Afterwards we then
4267          * clean up the GEM state tracking, flushing off the requests and
4268          * leaving the system in a known idle state.
4269          *
4270          * Note that is of the upmost importance that the GPU is idle and
4271          * all stray writes are flushed *before* we dismantle the backing
4272          * storage for the pinned objects.
4273          *
4274          * However, since we are uncertain that resetting the GPU on older
4275          * machines is a good idea, we don't - just in case it leaves the
4276          * machine in an unusable condition.
4277          */
4278         if (HAS_HW_CONTEXTS(dev_priv)) {
4279                 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
4280                 WARN_ON(reset && reset != -ENODEV);
4281         }
4282
4283         return 0;
4284
4285 err:
4286         mutex_unlock(&dev->struct_mutex);
4287         return ret;
4288 }
4289
4290 void i915_gem_resume(struct drm_i915_private *dev_priv)
4291 {
4292         struct drm_device *dev = &dev_priv->drm;
4293
4294         WARN_ON(dev_priv->gt.awake);
4295
4296         mutex_lock(&dev->struct_mutex);
4297         i915_gem_restore_gtt_mappings(dev_priv);
4298
4299         /* As we didn't flush the kernel context before suspend, we cannot
4300          * guarantee that the context image is complete. So let's just reset
4301          * it and start again.
4302          */
4303         dev_priv->gt.resume(dev_priv);
4304
4305         mutex_unlock(&dev->struct_mutex);
4306 }
4307
4308 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4309 {
4310         if (INTEL_GEN(dev_priv) < 5 ||
4311             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4312                 return;
4313
4314         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4315                                  DISP_TILE_SURFACE_SWIZZLING);
4316
4317         if (IS_GEN5(dev_priv))
4318                 return;
4319
4320         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4321         if (IS_GEN6(dev_priv))
4322                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4323         else if (IS_GEN7(dev_priv))
4324                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4325         else if (IS_GEN8(dev_priv))
4326                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4327         else
4328                 BUG();
4329 }
4330
4331 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4332 {
4333         I915_WRITE(RING_CTL(base), 0);
4334         I915_WRITE(RING_HEAD(base), 0);
4335         I915_WRITE(RING_TAIL(base), 0);
4336         I915_WRITE(RING_START(base), 0);
4337 }
4338
4339 static void init_unused_rings(struct drm_i915_private *dev_priv)
4340 {
4341         if (IS_I830(dev_priv)) {
4342                 init_unused_ring(dev_priv, PRB1_BASE);
4343                 init_unused_ring(dev_priv, SRB0_BASE);
4344                 init_unused_ring(dev_priv, SRB1_BASE);
4345                 init_unused_ring(dev_priv, SRB2_BASE);
4346                 init_unused_ring(dev_priv, SRB3_BASE);
4347         } else if (IS_GEN2(dev_priv)) {
4348                 init_unused_ring(dev_priv, SRB0_BASE);
4349                 init_unused_ring(dev_priv, SRB1_BASE);
4350         } else if (IS_GEN3(dev_priv)) {
4351                 init_unused_ring(dev_priv, PRB1_BASE);
4352                 init_unused_ring(dev_priv, PRB2_BASE);
4353         }
4354 }
4355
4356 int
4357 i915_gem_init_hw(struct drm_i915_private *dev_priv)
4358 {
4359         struct intel_engine_cs *engine;
4360         enum intel_engine_id id;
4361         int ret;
4362
4363         dev_priv->gt.last_init_time = ktime_get();
4364
4365         /* Double layer security blanket, see i915_gem_init() */
4366         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4367
4368         if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4369                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4370
4371         if (IS_HASWELL(dev_priv))
4372                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4373                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4374
4375         if (HAS_PCH_NOP(dev_priv)) {
4376                 if (IS_IVYBRIDGE(dev_priv)) {
4377                         u32 temp = I915_READ(GEN7_MSG_CTL);
4378                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4379                         I915_WRITE(GEN7_MSG_CTL, temp);
4380                 } else if (INTEL_GEN(dev_priv) >= 7) {
4381                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4382                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4383                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4384                 }
4385         }
4386
4387         i915_gem_init_swizzling(dev_priv);
4388
4389         /*
4390          * At least 830 can leave some of the unused rings
4391          * "active" (ie. head != tail) after resume which
4392          * will prevent c3 entry. Makes sure all unused rings
4393          * are totally idle.
4394          */
4395         init_unused_rings(dev_priv);
4396
4397         BUG_ON(!dev_priv->kernel_context);
4398
4399         ret = i915_ppgtt_init_hw(dev_priv);
4400         if (ret) {
4401                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4402                 goto out;
4403         }
4404
4405         /* Need to do basic initialisation of all rings first: */
4406         for_each_engine(engine, dev_priv, id) {
4407                 ret = engine->init_hw(engine);
4408                 if (ret)
4409                         goto out;
4410         }
4411
4412         intel_mocs_init_l3cc_table(dev_priv);
4413
4414         /* We can't enable contexts until all firmware is loaded */
4415         ret = intel_guc_setup(dev_priv);
4416         if (ret)
4417                 goto out;
4418
4419 out:
4420         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4421         return ret;
4422 }
4423
4424 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4425 {
4426         if (INTEL_INFO(dev_priv)->gen < 6)
4427                 return false;
4428
4429         /* TODO: make semaphores and Execlists play nicely together */
4430         if (i915.enable_execlists)
4431                 return false;
4432
4433         if (value >= 0)
4434                 return value;
4435
4436 #ifdef CONFIG_INTEL_IOMMU
4437         /* Enable semaphores on SNB when IO remapping is off */
4438         if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4439                 return false;
4440 #endif
4441
4442         return true;
4443 }
4444
4445 int i915_gem_init(struct drm_i915_private *dev_priv)
4446 {
4447         int ret;
4448
4449         mutex_lock(&dev_priv->drm.struct_mutex);
4450
4451         if (!i915.enable_execlists) {
4452                 dev_priv->gt.resume = intel_legacy_submission_resume;
4453                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4454         } else {
4455                 dev_priv->gt.resume = intel_lr_context_resume;
4456                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4457         }
4458
4459         /* This is just a security blanket to placate dragons.
4460          * On some systems, we very sporadically observe that the first TLBs
4461          * used by the CS may be stale, despite us poking the TLB reset. If
4462          * we hold the forcewake during initialisation these problems
4463          * just magically go away.
4464          */
4465         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4466
4467         i915_gem_init_userptr(dev_priv);
4468
4469         ret = i915_gem_init_ggtt(dev_priv);
4470         if (ret)
4471                 goto out_unlock;
4472
4473         ret = i915_gem_context_init(dev_priv);
4474         if (ret)
4475                 goto out_unlock;
4476
4477         ret = intel_engines_init(dev_priv);
4478         if (ret)
4479                 goto out_unlock;
4480
4481         ret = i915_gem_init_hw(dev_priv);
4482         if (ret == -EIO) {
4483                 /* Allow engine initialisation to fail by marking the GPU as
4484                  * wedged. But we only want to do this where the GPU is angry,
4485                  * for all other failure, such as an allocation failure, bail.
4486                  */
4487                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4488                 i915_gem_set_wedged(dev_priv);
4489                 ret = 0;
4490         }
4491
4492 out_unlock:
4493         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4494         mutex_unlock(&dev_priv->drm.struct_mutex);
4495
4496         return ret;
4497 }
4498
4499 void
4500 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
4501 {
4502         struct intel_engine_cs *engine;
4503         enum intel_engine_id id;
4504
4505         for_each_engine(engine, dev_priv, id)
4506                 dev_priv->gt.cleanup_engine(engine);
4507 }
4508
4509 void
4510 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4511 {
4512         int i;
4513
4514         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4515             !IS_CHERRYVIEW(dev_priv))
4516                 dev_priv->num_fence_regs = 32;
4517         else if (INTEL_INFO(dev_priv)->gen >= 4 ||
4518                  IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4519                  IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
4520                 dev_priv->num_fence_regs = 16;
4521         else
4522                 dev_priv->num_fence_regs = 8;
4523
4524         if (intel_vgpu_active(dev_priv))
4525                 dev_priv->num_fence_regs =
4526                                 I915_READ(vgtif_reg(avail_rs.fence_num));
4527
4528         /* Initialize fence registers to zero */
4529         for (i = 0; i < dev_priv->num_fence_regs; i++) {
4530                 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4531
4532                 fence->i915 = dev_priv;
4533                 fence->id = i;
4534                 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4535         }
4536         i915_gem_restore_fences(dev_priv);
4537
4538         i915_gem_detect_bit_6_swizzle(dev_priv);
4539 }
4540
4541 int
4542 i915_gem_load_init(struct drm_i915_private *dev_priv)
4543 {
4544         int err = -ENOMEM;
4545
4546         dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
4547         if (!dev_priv->objects)
4548                 goto err_out;
4549
4550         dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
4551         if (!dev_priv->vmas)
4552                 goto err_objects;
4553
4554         dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
4555                                         SLAB_HWCACHE_ALIGN |
4556                                         SLAB_RECLAIM_ACCOUNT |
4557                                         SLAB_DESTROY_BY_RCU);
4558         if (!dev_priv->requests)
4559                 goto err_vmas;
4560
4561         dev_priv->dependencies = KMEM_CACHE(i915_dependency,
4562                                             SLAB_HWCACHE_ALIGN |
4563                                             SLAB_RECLAIM_ACCOUNT);
4564         if (!dev_priv->dependencies)
4565                 goto err_requests;
4566
4567         mutex_lock(&dev_priv->drm.struct_mutex);
4568         INIT_LIST_HEAD(&dev_priv->gt.timelines);
4569         err = i915_gem_timeline_init__global(dev_priv);
4570         mutex_unlock(&dev_priv->drm.struct_mutex);
4571         if (err)
4572                 goto err_dependencies;
4573
4574         INIT_LIST_HEAD(&dev_priv->context_list);
4575         INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
4576         init_llist_head(&dev_priv->mm.free_list);
4577         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4578         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4579         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4580         INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4581         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4582                           i915_gem_retire_work_handler);
4583         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4584                           i915_gem_idle_work_handler);
4585         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4586         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4587
4588         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4589
4590         init_waitqueue_head(&dev_priv->pending_flip_queue);
4591
4592         dev_priv->mm.interruptible = true;
4593
4594         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4595
4596         spin_lock_init(&dev_priv->fb_tracking.lock);
4597
4598         return 0;
4599
4600 err_dependencies:
4601         kmem_cache_destroy(dev_priv->dependencies);
4602 err_requests:
4603         kmem_cache_destroy(dev_priv->requests);
4604 err_vmas:
4605         kmem_cache_destroy(dev_priv->vmas);
4606 err_objects:
4607         kmem_cache_destroy(dev_priv->objects);
4608 err_out:
4609         return err;
4610 }
4611
4612 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
4613 {
4614         WARN_ON(!llist_empty(&dev_priv->mm.free_list));
4615
4616         mutex_lock(&dev_priv->drm.struct_mutex);
4617         i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
4618         WARN_ON(!list_empty(&dev_priv->gt.timelines));
4619         mutex_unlock(&dev_priv->drm.struct_mutex);
4620
4621         kmem_cache_destroy(dev_priv->dependencies);
4622         kmem_cache_destroy(dev_priv->requests);
4623         kmem_cache_destroy(dev_priv->vmas);
4624         kmem_cache_destroy(dev_priv->objects);
4625
4626         /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4627         rcu_barrier();
4628 }
4629
4630 int i915_gem_freeze(struct drm_i915_private *dev_priv)
4631 {
4632         intel_runtime_pm_get(dev_priv);
4633
4634         mutex_lock(&dev_priv->drm.struct_mutex);
4635         i915_gem_shrink_all(dev_priv);
4636         mutex_unlock(&dev_priv->drm.struct_mutex);
4637
4638         intel_runtime_pm_put(dev_priv);
4639
4640         return 0;
4641 }
4642
4643 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4644 {
4645         struct drm_i915_gem_object *obj;
4646         struct list_head *phases[] = {
4647                 &dev_priv->mm.unbound_list,
4648                 &dev_priv->mm.bound_list,
4649                 NULL
4650         }, **p;
4651
4652         /* Called just before we write the hibernation image.
4653          *
4654          * We need to update the domain tracking to reflect that the CPU
4655          * will be accessing all the pages to create and restore from the
4656          * hibernation, and so upon restoration those pages will be in the
4657          * CPU domain.
4658          *
4659          * To make sure the hibernation image contains the latest state,
4660          * we update that state just before writing out the image.
4661          *
4662          * To try and reduce the hibernation image, we manually shrink
4663          * the objects as well.
4664          */
4665
4666         mutex_lock(&dev_priv->drm.struct_mutex);
4667         i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4668
4669         for (p = phases; *p; p++) {
4670                 list_for_each_entry(obj, *p, global_link) {
4671                         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4672                         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4673                 }
4674         }
4675         mutex_unlock(&dev_priv->drm.struct_mutex);
4676
4677         return 0;
4678 }
4679
4680 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4681 {
4682         struct drm_i915_file_private *file_priv = file->driver_priv;
4683         struct drm_i915_gem_request *request;
4684
4685         /* Clean up our request list when the client is going away, so that
4686          * later retire_requests won't dereference our soon-to-be-gone
4687          * file_priv.
4688          */
4689         spin_lock(&file_priv->mm.lock);
4690         list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4691                 request->file_priv = NULL;
4692         spin_unlock(&file_priv->mm.lock);
4693
4694         if (!list_empty(&file_priv->rps.link)) {
4695                 spin_lock(&to_i915(dev)->rps.client_lock);
4696                 list_del(&file_priv->rps.link);
4697                 spin_unlock(&to_i915(dev)->rps.client_lock);
4698         }
4699 }
4700
4701 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4702 {
4703         struct drm_i915_file_private *file_priv;
4704         int ret;
4705
4706         DRM_DEBUG("\n");
4707
4708         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4709         if (!file_priv)
4710                 return -ENOMEM;
4711
4712         file->driver_priv = file_priv;
4713         file_priv->dev_priv = to_i915(dev);
4714         file_priv->file = file;
4715         INIT_LIST_HEAD(&file_priv->rps.link);
4716
4717         spin_lock_init(&file_priv->mm.lock);
4718         INIT_LIST_HEAD(&file_priv->mm.request_list);
4719
4720         file_priv->bsd_engine = -1;
4721
4722         ret = i915_gem_context_open(dev, file);
4723         if (ret)
4724                 kfree(file_priv);
4725
4726         return ret;
4727 }
4728
4729 /**
4730  * i915_gem_track_fb - update frontbuffer tracking
4731  * @old: current GEM buffer for the frontbuffer slots
4732  * @new: new GEM buffer for the frontbuffer slots
4733  * @frontbuffer_bits: bitmask of frontbuffer slots
4734  *
4735  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4736  * from @old and setting them in @new. Both @old and @new can be NULL.
4737  */
4738 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4739                        struct drm_i915_gem_object *new,
4740                        unsigned frontbuffer_bits)
4741 {
4742         /* Control of individual bits within the mask are guarded by
4743          * the owning plane->mutex, i.e. we can never see concurrent
4744          * manipulation of individual bits. But since the bitfield as a whole
4745          * is updated using RMW, we need to use atomics in order to update
4746          * the bits.
4747          */
4748         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4749                      sizeof(atomic_t) * BITS_PER_BYTE);
4750
4751         if (old) {
4752                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4753                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4754         }
4755
4756         if (new) {
4757                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4758                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4759         }
4760 }
4761
4762 /* Allocate a new GEM object and fill it with the supplied data */
4763 struct drm_i915_gem_object *
4764 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
4765                                  const void *data, size_t size)
4766 {
4767         struct drm_i915_gem_object *obj;
4768         struct sg_table *sg;
4769         size_t bytes;
4770         int ret;
4771
4772         obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
4773         if (IS_ERR(obj))
4774                 return obj;
4775
4776         ret = i915_gem_object_set_to_cpu_domain(obj, true);
4777         if (ret)
4778                 goto fail;
4779
4780         ret = i915_gem_object_pin_pages(obj);
4781         if (ret)
4782                 goto fail;
4783
4784         sg = obj->mm.pages;
4785         bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4786         obj->mm.dirty = true; /* Backing store is now out of date */
4787         i915_gem_object_unpin_pages(obj);
4788
4789         if (WARN_ON(bytes != size)) {
4790                 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4791                 ret = -EFAULT;
4792                 goto fail;
4793         }
4794
4795         return obj;
4796
4797 fail:
4798         i915_gem_object_put(obj);
4799         return ERR_PTR(ret);
4800 }
4801
4802 struct scatterlist *
4803 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
4804                        unsigned int n,
4805                        unsigned int *offset)
4806 {
4807         struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
4808         struct scatterlist *sg;
4809         unsigned int idx, count;
4810
4811         might_sleep();
4812         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
4813         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
4814
4815         /* As we iterate forward through the sg, we record each entry in a
4816          * radixtree for quick repeated (backwards) lookups. If we have seen
4817          * this index previously, we will have an entry for it.
4818          *
4819          * Initial lookup is O(N), but this is amortized to O(1) for
4820          * sequential page access (where each new request is consecutive
4821          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
4822          * i.e. O(1) with a large constant!
4823          */
4824         if (n < READ_ONCE(iter->sg_idx))
4825                 goto lookup;
4826
4827         mutex_lock(&iter->lock);
4828
4829         /* We prefer to reuse the last sg so that repeated lookup of this
4830          * (or the subsequent) sg are fast - comparing against the last
4831          * sg is faster than going through the radixtree.
4832          */
4833
4834         sg = iter->sg_pos;
4835         idx = iter->sg_idx;
4836         count = __sg_page_count(sg);
4837
4838         while (idx + count <= n) {
4839                 unsigned long exception, i;
4840                 int ret;
4841
4842                 /* If we cannot allocate and insert this entry, or the
4843                  * individual pages from this range, cancel updating the
4844                  * sg_idx so that on this lookup we are forced to linearly
4845                  * scan onwards, but on future lookups we will try the
4846                  * insertion again (in which case we need to be careful of
4847                  * the error return reporting that we have already inserted
4848                  * this index).
4849                  */
4850                 ret = radix_tree_insert(&iter->radix, idx, sg);
4851                 if (ret && ret != -EEXIST)
4852                         goto scan;
4853
4854                 exception =
4855                         RADIX_TREE_EXCEPTIONAL_ENTRY |
4856                         idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
4857                 for (i = 1; i < count; i++) {
4858                         ret = radix_tree_insert(&iter->radix, idx + i,
4859                                                 (void *)exception);
4860                         if (ret && ret != -EEXIST)
4861                                 goto scan;
4862                 }
4863
4864                 idx += count;
4865                 sg = ____sg_next(sg);
4866                 count = __sg_page_count(sg);
4867         }
4868
4869 scan:
4870         iter->sg_pos = sg;
4871         iter->sg_idx = idx;
4872
4873         mutex_unlock(&iter->lock);
4874
4875         if (unlikely(n < idx)) /* insertion completed by another thread */
4876                 goto lookup;
4877
4878         /* In case we failed to insert the entry into the radixtree, we need
4879          * to look beyond the current sg.
4880          */
4881         while (idx + count <= n) {
4882                 idx += count;
4883                 sg = ____sg_next(sg);
4884                 count = __sg_page_count(sg);
4885         }
4886
4887         *offset = n - idx;
4888         return sg;
4889
4890 lookup:
4891         rcu_read_lock();
4892
4893         sg = radix_tree_lookup(&iter->radix, n);
4894         GEM_BUG_ON(!sg);
4895
4896         /* If this index is in the middle of multi-page sg entry,
4897          * the radixtree will contain an exceptional entry that points
4898          * to the start of that range. We will return the pointer to
4899          * the base page and the offset of this page within the
4900          * sg entry's range.
4901          */
4902         *offset = 0;
4903         if (unlikely(radix_tree_exception(sg))) {
4904                 unsigned long base =
4905                         (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
4906
4907                 sg = radix_tree_lookup(&iter->radix, base);
4908                 GEM_BUG_ON(!sg);
4909
4910                 *offset = n - base;
4911         }
4912
4913         rcu_read_unlock();
4914
4915         return sg;
4916 }
4917
4918 struct page *
4919 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
4920 {
4921         struct scatterlist *sg;
4922         unsigned int offset;
4923
4924         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
4925
4926         sg = i915_gem_object_get_sg(obj, n, &offset);
4927         return nth_page(sg_page(sg), offset);
4928 }
4929
4930 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4931 struct page *
4932 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
4933                                unsigned int n)
4934 {
4935         struct page *page;
4936
4937         page = i915_gem_object_get_page(obj, n);
4938         if (!obj->mm.dirty)
4939                 set_page_dirty(page);
4940
4941         return page;
4942 }
4943
4944 dma_addr_t
4945 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
4946                                 unsigned long n)
4947 {
4948         struct scatterlist *sg;
4949         unsigned int offset;
4950
4951         sg = i915_gem_object_get_sg(obj, n, &offset);
4952         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
4953 }