]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Fallback to single PAGE_SIZE segments for DMA remapping
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_vgpu.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include "intel_frontbuffer.h"
36 #include "intel_mocs.h"
37 #include <linux/dma-fence-array.h>
38 #include <linux/reservation.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/slab.h>
41 #include <linux/stop_machine.h>
42 #include <linux/swap.h>
43 #include <linux/pci.h>
44 #include <linux/dma-buf.h>
45
46 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
47 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
48 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
49
50 static bool cpu_cache_is_coherent(struct drm_device *dev,
51                                   enum i915_cache_level level)
52 {
53         return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
54 }
55
56 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
57 {
58         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
59                 return false;
60
61         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
62                 return true;
63
64         return obj->pin_display;
65 }
66
67 static int
68 insert_mappable_node(struct i915_ggtt *ggtt,
69                      struct drm_mm_node *node, u32 size)
70 {
71         memset(node, 0, sizeof(*node));
72         return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
73                                                    size, 0,
74                                                    I915_COLOR_UNEVICTABLE,
75                                                    0, ggtt->mappable_end,
76                                                    DRM_MM_SEARCH_DEFAULT,
77                                                    DRM_MM_CREATE_DEFAULT);
78 }
79
80 static void
81 remove_mappable_node(struct drm_mm_node *node)
82 {
83         drm_mm_remove_node(node);
84 }
85
86 /* some bookkeeping */
87 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
88                                   u64 size)
89 {
90         spin_lock(&dev_priv->mm.object_stat_lock);
91         dev_priv->mm.object_count++;
92         dev_priv->mm.object_memory += size;
93         spin_unlock(&dev_priv->mm.object_stat_lock);
94 }
95
96 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
97                                      u64 size)
98 {
99         spin_lock(&dev_priv->mm.object_stat_lock);
100         dev_priv->mm.object_count--;
101         dev_priv->mm.object_memory -= size;
102         spin_unlock(&dev_priv->mm.object_stat_lock);
103 }
104
105 static int
106 i915_gem_wait_for_error(struct i915_gpu_error *error)
107 {
108         int ret;
109
110         might_sleep();
111
112         if (!i915_reset_in_progress(error))
113                 return 0;
114
115         /*
116          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
117          * userspace. If it takes that long something really bad is going on and
118          * we should simply try to bail out and fail as gracefully as possible.
119          */
120         ret = wait_event_interruptible_timeout(error->reset_queue,
121                                                !i915_reset_in_progress(error),
122                                                I915_RESET_TIMEOUT);
123         if (ret == 0) {
124                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
125                 return -EIO;
126         } else if (ret < 0) {
127                 return ret;
128         } else {
129                 return 0;
130         }
131 }
132
133 int i915_mutex_lock_interruptible(struct drm_device *dev)
134 {
135         struct drm_i915_private *dev_priv = to_i915(dev);
136         int ret;
137
138         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
139         if (ret)
140                 return ret;
141
142         ret = mutex_lock_interruptible(&dev->struct_mutex);
143         if (ret)
144                 return ret;
145
146         return 0;
147 }
148
149 int
150 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
151                             struct drm_file *file)
152 {
153         struct drm_i915_private *dev_priv = to_i915(dev);
154         struct i915_ggtt *ggtt = &dev_priv->ggtt;
155         struct drm_i915_gem_get_aperture *args = data;
156         struct i915_vma *vma;
157         size_t pinned;
158
159         pinned = 0;
160         mutex_lock(&dev->struct_mutex);
161         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
162                 if (i915_vma_is_pinned(vma))
163                         pinned += vma->node.size;
164         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
165                 if (i915_vma_is_pinned(vma))
166                         pinned += vma->node.size;
167         mutex_unlock(&dev->struct_mutex);
168
169         args->aper_size = ggtt->base.total;
170         args->aper_available_size = args->aper_size - pinned;
171
172         return 0;
173 }
174
175 static struct sg_table *
176 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 {
178         struct address_space *mapping = obj->base.filp->f_mapping;
179         drm_dma_handle_t *phys;
180         struct sg_table *st;
181         struct scatterlist *sg;
182         char *vaddr;
183         int i;
184
185         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
186                 return ERR_PTR(-EINVAL);
187
188         /* Always aligning to the object size, allows a single allocation
189          * to handle all possible callers, and given typical object sizes,
190          * the alignment of the buddy allocation will naturally match.
191          */
192         phys = drm_pci_alloc(obj->base.dev,
193                              obj->base.size,
194                              roundup_pow_of_two(obj->base.size));
195         if (!phys)
196                 return ERR_PTR(-ENOMEM);
197
198         vaddr = phys->vaddr;
199         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
200                 struct page *page;
201                 char *src;
202
203                 page = shmem_read_mapping_page(mapping, i);
204                 if (IS_ERR(page)) {
205                         st = ERR_CAST(page);
206                         goto err_phys;
207                 }
208
209                 src = kmap_atomic(page);
210                 memcpy(vaddr, src, PAGE_SIZE);
211                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
212                 kunmap_atomic(src);
213
214                 put_page(page);
215                 vaddr += PAGE_SIZE;
216         }
217
218         i915_gem_chipset_flush(to_i915(obj->base.dev));
219
220         st = kmalloc(sizeof(*st), GFP_KERNEL);
221         if (!st) {
222                 st = ERR_PTR(-ENOMEM);
223                 goto err_phys;
224         }
225
226         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
227                 kfree(st);
228                 st = ERR_PTR(-ENOMEM);
229                 goto err_phys;
230         }
231
232         sg = st->sgl;
233         sg->offset = 0;
234         sg->length = obj->base.size;
235
236         sg_dma_address(sg) = phys->busaddr;
237         sg_dma_len(sg) = obj->base.size;
238
239         obj->phys_handle = phys;
240         return st;
241
242 err_phys:
243         drm_pci_free(obj->base.dev, phys);
244         return st;
245 }
246
247 static void
248 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
249                                 struct sg_table *pages)
250 {
251         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
252
253         if (obj->mm.madv == I915_MADV_DONTNEED)
254                 obj->mm.dirty = false;
255
256         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
257             !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
258                 drm_clflush_sg(pages);
259
260         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
261         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
262 }
263
264 static void
265 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
266                                struct sg_table *pages)
267 {
268         __i915_gem_object_release_shmem(obj, pages);
269
270         if (obj->mm.dirty) {
271                 struct address_space *mapping = obj->base.filp->f_mapping;
272                 char *vaddr = obj->phys_handle->vaddr;
273                 int i;
274
275                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
276                         struct page *page;
277                         char *dst;
278
279                         page = shmem_read_mapping_page(mapping, i);
280                         if (IS_ERR(page))
281                                 continue;
282
283                         dst = kmap_atomic(page);
284                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
285                         memcpy(dst, vaddr, PAGE_SIZE);
286                         kunmap_atomic(dst);
287
288                         set_page_dirty(page);
289                         if (obj->mm.madv == I915_MADV_WILLNEED)
290                                 mark_page_accessed(page);
291                         put_page(page);
292                         vaddr += PAGE_SIZE;
293                 }
294                 obj->mm.dirty = false;
295         }
296
297         sg_free_table(pages);
298         kfree(pages);
299
300         drm_pci_free(obj->base.dev, obj->phys_handle);
301 }
302
303 static void
304 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
305 {
306         i915_gem_object_unpin_pages(obj);
307 }
308
309 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
310         .get_pages = i915_gem_object_get_pages_phys,
311         .put_pages = i915_gem_object_put_pages_phys,
312         .release = i915_gem_object_release_phys,
313 };
314
315 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
316 {
317         struct i915_vma *vma;
318         LIST_HEAD(still_in_list);
319         int ret;
320
321         lockdep_assert_held(&obj->base.dev->struct_mutex);
322
323         /* Closed vma are removed from the obj->vma_list - but they may
324          * still have an active binding on the object. To remove those we
325          * must wait for all rendering to complete to the object (as unbinding
326          * must anyway), and retire the requests.
327          */
328         ret = i915_gem_object_wait(obj,
329                                    I915_WAIT_INTERRUPTIBLE |
330                                    I915_WAIT_LOCKED |
331                                    I915_WAIT_ALL,
332                                    MAX_SCHEDULE_TIMEOUT,
333                                    NULL);
334         if (ret)
335                 return ret;
336
337         i915_gem_retire_requests(to_i915(obj->base.dev));
338
339         while ((vma = list_first_entry_or_null(&obj->vma_list,
340                                                struct i915_vma,
341                                                obj_link))) {
342                 list_move_tail(&vma->obj_link, &still_in_list);
343                 ret = i915_vma_unbind(vma);
344                 if (ret)
345                         break;
346         }
347         list_splice(&still_in_list, &obj->vma_list);
348
349         return ret;
350 }
351
352 static long
353 i915_gem_object_wait_fence(struct dma_fence *fence,
354                            unsigned int flags,
355                            long timeout,
356                            struct intel_rps_client *rps)
357 {
358         struct drm_i915_gem_request *rq;
359
360         BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
361
362         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
363                 return timeout;
364
365         if (!dma_fence_is_i915(fence))
366                 return dma_fence_wait_timeout(fence,
367                                               flags & I915_WAIT_INTERRUPTIBLE,
368                                               timeout);
369
370         rq = to_request(fence);
371         if (i915_gem_request_completed(rq))
372                 goto out;
373
374         /* This client is about to stall waiting for the GPU. In many cases
375          * this is undesirable and limits the throughput of the system, as
376          * many clients cannot continue processing user input/output whilst
377          * blocked. RPS autotuning may take tens of milliseconds to respond
378          * to the GPU load and thus incurs additional latency for the client.
379          * We can circumvent that by promoting the GPU frequency to maximum
380          * before we wait. This makes the GPU throttle up much more quickly
381          * (good for benchmarks and user experience, e.g. window animations),
382          * but at a cost of spending more power processing the workload
383          * (bad for battery). Not all clients even want their results
384          * immediately and for them we should just let the GPU select its own
385          * frequency to maximise efficiency. To prevent a single client from
386          * forcing the clocks too high for the whole system, we only allow
387          * each client to waitboost once in a busy period.
388          */
389         if (rps) {
390                 if (INTEL_GEN(rq->i915) >= 6)
391                         gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
392                 else
393                         rps = NULL;
394         }
395
396         timeout = i915_wait_request(rq, flags, timeout);
397
398 out:
399         if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
400                 i915_gem_request_retire_upto(rq);
401
402         if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
403                 /* The GPU is now idle and this client has stalled.
404                  * Since no other client has submitted a request in the
405                  * meantime, assume that this client is the only one
406                  * supplying work to the GPU but is unable to keep that
407                  * work supplied because it is waiting. Since the GPU is
408                  * then never kept fully busy, RPS autoclocking will
409                  * keep the clocks relatively low, causing further delays.
410                  * Compensate by giving the synchronous client credit for
411                  * a waitboost next time.
412                  */
413                 spin_lock(&rq->i915->rps.client_lock);
414                 list_del_init(&rps->link);
415                 spin_unlock(&rq->i915->rps.client_lock);
416         }
417
418         return timeout;
419 }
420
421 static long
422 i915_gem_object_wait_reservation(struct reservation_object *resv,
423                                  unsigned int flags,
424                                  long timeout,
425                                  struct intel_rps_client *rps)
426 {
427         struct dma_fence *excl;
428
429         if (flags & I915_WAIT_ALL) {
430                 struct dma_fence **shared;
431                 unsigned int count, i;
432                 int ret;
433
434                 ret = reservation_object_get_fences_rcu(resv,
435                                                         &excl, &count, &shared);
436                 if (ret)
437                         return ret;
438
439                 for (i = 0; i < count; i++) {
440                         timeout = i915_gem_object_wait_fence(shared[i],
441                                                              flags, timeout,
442                                                              rps);
443                         if (timeout <= 0)
444                                 break;
445
446                         dma_fence_put(shared[i]);
447                 }
448
449                 for (; i < count; i++)
450                         dma_fence_put(shared[i]);
451                 kfree(shared);
452         } else {
453                 excl = reservation_object_get_excl_rcu(resv);
454         }
455
456         if (excl && timeout > 0)
457                 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
458
459         dma_fence_put(excl);
460
461         return timeout;
462 }
463
464 static void __fence_set_priority(struct dma_fence *fence, int prio)
465 {
466         struct drm_i915_gem_request *rq;
467         struct intel_engine_cs *engine;
468
469         if (!dma_fence_is_i915(fence))
470                 return;
471
472         rq = to_request(fence);
473         engine = rq->engine;
474         if (!engine->schedule)
475                 return;
476
477         engine->schedule(rq, prio);
478 }
479
480 static void fence_set_priority(struct dma_fence *fence, int prio)
481 {
482         /* Recurse once into a fence-array */
483         if (dma_fence_is_array(fence)) {
484                 struct dma_fence_array *array = to_dma_fence_array(fence);
485                 int i;
486
487                 for (i = 0; i < array->num_fences; i++)
488                         __fence_set_priority(array->fences[i], prio);
489         } else {
490                 __fence_set_priority(fence, prio);
491         }
492 }
493
494 int
495 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
496                               unsigned int flags,
497                               int prio)
498 {
499         struct dma_fence *excl;
500
501         if (flags & I915_WAIT_ALL) {
502                 struct dma_fence **shared;
503                 unsigned int count, i;
504                 int ret;
505
506                 ret = reservation_object_get_fences_rcu(obj->resv,
507                                                         &excl, &count, &shared);
508                 if (ret)
509                         return ret;
510
511                 for (i = 0; i < count; i++) {
512                         fence_set_priority(shared[i], prio);
513                         dma_fence_put(shared[i]);
514                 }
515
516                 kfree(shared);
517         } else {
518                 excl = reservation_object_get_excl_rcu(obj->resv);
519         }
520
521         if (excl) {
522                 fence_set_priority(excl, prio);
523                 dma_fence_put(excl);
524         }
525         return 0;
526 }
527
528 /**
529  * Waits for rendering to the object to be completed
530  * @obj: i915 gem object
531  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
532  * @timeout: how long to wait
533  * @rps: client (user process) to charge for any waitboosting
534  */
535 int
536 i915_gem_object_wait(struct drm_i915_gem_object *obj,
537                      unsigned int flags,
538                      long timeout,
539                      struct intel_rps_client *rps)
540 {
541         might_sleep();
542 #if IS_ENABLED(CONFIG_LOCKDEP)
543         GEM_BUG_ON(debug_locks &&
544                    !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
545                    !!(flags & I915_WAIT_LOCKED));
546 #endif
547         GEM_BUG_ON(timeout < 0);
548
549         timeout = i915_gem_object_wait_reservation(obj->resv,
550                                                    flags, timeout,
551                                                    rps);
552         return timeout < 0 ? timeout : 0;
553 }
554
555 static struct intel_rps_client *to_rps_client(struct drm_file *file)
556 {
557         struct drm_i915_file_private *fpriv = file->driver_priv;
558
559         return &fpriv->rps;
560 }
561
562 int
563 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
564                             int align)
565 {
566         int ret;
567
568         if (align > obj->base.size)
569                 return -EINVAL;
570
571         if (obj->ops == &i915_gem_phys_ops)
572                 return 0;
573
574         if (obj->mm.madv != I915_MADV_WILLNEED)
575                 return -EFAULT;
576
577         if (obj->base.filp == NULL)
578                 return -EINVAL;
579
580         ret = i915_gem_object_unbind(obj);
581         if (ret)
582                 return ret;
583
584         __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
585         if (obj->mm.pages)
586                 return -EBUSY;
587
588         obj->ops = &i915_gem_phys_ops;
589
590         return i915_gem_object_pin_pages(obj);
591 }
592
593 static int
594 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
595                      struct drm_i915_gem_pwrite *args,
596                      struct drm_file *file)
597 {
598         struct drm_device *dev = obj->base.dev;
599         void *vaddr = obj->phys_handle->vaddr + args->offset;
600         char __user *user_data = u64_to_user_ptr(args->data_ptr);
601         int ret;
602
603         /* We manually control the domain here and pretend that it
604          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
605          */
606         lockdep_assert_held(&obj->base.dev->struct_mutex);
607         ret = i915_gem_object_wait(obj,
608                                    I915_WAIT_INTERRUPTIBLE |
609                                    I915_WAIT_LOCKED |
610                                    I915_WAIT_ALL,
611                                    MAX_SCHEDULE_TIMEOUT,
612                                    to_rps_client(file));
613         if (ret)
614                 return ret;
615
616         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
617         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
618                 unsigned long unwritten;
619
620                 /* The physical object once assigned is fixed for the lifetime
621                  * of the obj, so we can safely drop the lock and continue
622                  * to access vaddr.
623                  */
624                 mutex_unlock(&dev->struct_mutex);
625                 unwritten = copy_from_user(vaddr, user_data, args->size);
626                 mutex_lock(&dev->struct_mutex);
627                 if (unwritten) {
628                         ret = -EFAULT;
629                         goto out;
630                 }
631         }
632
633         drm_clflush_virt_range(vaddr, args->size);
634         i915_gem_chipset_flush(to_i915(dev));
635
636 out:
637         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
638         return ret;
639 }
640
641 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
642 {
643         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
644 }
645
646 void i915_gem_object_free(struct drm_i915_gem_object *obj)
647 {
648         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
649         kmem_cache_free(dev_priv->objects, obj);
650 }
651
652 static int
653 i915_gem_create(struct drm_file *file,
654                 struct drm_i915_private *dev_priv,
655                 uint64_t size,
656                 uint32_t *handle_p)
657 {
658         struct drm_i915_gem_object *obj;
659         int ret;
660         u32 handle;
661
662         size = roundup(size, PAGE_SIZE);
663         if (size == 0)
664                 return -EINVAL;
665
666         /* Allocate the new object */
667         obj = i915_gem_object_create(dev_priv, size);
668         if (IS_ERR(obj))
669                 return PTR_ERR(obj);
670
671         ret = drm_gem_handle_create(file, &obj->base, &handle);
672         /* drop reference from allocate - handle holds it now */
673         i915_gem_object_put(obj);
674         if (ret)
675                 return ret;
676
677         *handle_p = handle;
678         return 0;
679 }
680
681 int
682 i915_gem_dumb_create(struct drm_file *file,
683                      struct drm_device *dev,
684                      struct drm_mode_create_dumb *args)
685 {
686         /* have to work out size/pitch and return them */
687         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
688         args->size = args->pitch * args->height;
689         return i915_gem_create(file, to_i915(dev),
690                                args->size, &args->handle);
691 }
692
693 /**
694  * Creates a new mm object and returns a handle to it.
695  * @dev: drm device pointer
696  * @data: ioctl data blob
697  * @file: drm file pointer
698  */
699 int
700 i915_gem_create_ioctl(struct drm_device *dev, void *data,
701                       struct drm_file *file)
702 {
703         struct drm_i915_private *dev_priv = to_i915(dev);
704         struct drm_i915_gem_create *args = data;
705
706         i915_gem_flush_free_objects(dev_priv);
707
708         return i915_gem_create(file, dev_priv,
709                                args->size, &args->handle);
710 }
711
712 static inline int
713 __copy_to_user_swizzled(char __user *cpu_vaddr,
714                         const char *gpu_vaddr, int gpu_offset,
715                         int length)
716 {
717         int ret, cpu_offset = 0;
718
719         while (length > 0) {
720                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
721                 int this_length = min(cacheline_end - gpu_offset, length);
722                 int swizzled_gpu_offset = gpu_offset ^ 64;
723
724                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
725                                      gpu_vaddr + swizzled_gpu_offset,
726                                      this_length);
727                 if (ret)
728                         return ret + length;
729
730                 cpu_offset += this_length;
731                 gpu_offset += this_length;
732                 length -= this_length;
733         }
734
735         return 0;
736 }
737
738 static inline int
739 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
740                           const char __user *cpu_vaddr,
741                           int length)
742 {
743         int ret, cpu_offset = 0;
744
745         while (length > 0) {
746                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
747                 int this_length = min(cacheline_end - gpu_offset, length);
748                 int swizzled_gpu_offset = gpu_offset ^ 64;
749
750                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
751                                        cpu_vaddr + cpu_offset,
752                                        this_length);
753                 if (ret)
754                         return ret + length;
755
756                 cpu_offset += this_length;
757                 gpu_offset += this_length;
758                 length -= this_length;
759         }
760
761         return 0;
762 }
763
764 /*
765  * Pins the specified object's pages and synchronizes the object with
766  * GPU accesses. Sets needs_clflush to non-zero if the caller should
767  * flush the object from the CPU cache.
768  */
769 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
770                                     unsigned int *needs_clflush)
771 {
772         int ret;
773
774         lockdep_assert_held(&obj->base.dev->struct_mutex);
775
776         *needs_clflush = 0;
777         if (!i915_gem_object_has_struct_page(obj))
778                 return -ENODEV;
779
780         ret = i915_gem_object_wait(obj,
781                                    I915_WAIT_INTERRUPTIBLE |
782                                    I915_WAIT_LOCKED,
783                                    MAX_SCHEDULE_TIMEOUT,
784                                    NULL);
785         if (ret)
786                 return ret;
787
788         ret = i915_gem_object_pin_pages(obj);
789         if (ret)
790                 return ret;
791
792         i915_gem_object_flush_gtt_write_domain(obj);
793
794         /* If we're not in the cpu read domain, set ourself into the gtt
795          * read domain and manually flush cachelines (if required). This
796          * optimizes for the case when the gpu will dirty the data
797          * anyway again before the next pread happens.
798          */
799         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
800                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
801                                                         obj->cache_level);
802
803         if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
804                 ret = i915_gem_object_set_to_cpu_domain(obj, false);
805                 if (ret)
806                         goto err_unpin;
807
808                 *needs_clflush = 0;
809         }
810
811         /* return with the pages pinned */
812         return 0;
813
814 err_unpin:
815         i915_gem_object_unpin_pages(obj);
816         return ret;
817 }
818
819 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
820                                      unsigned int *needs_clflush)
821 {
822         int ret;
823
824         lockdep_assert_held(&obj->base.dev->struct_mutex);
825
826         *needs_clflush = 0;
827         if (!i915_gem_object_has_struct_page(obj))
828                 return -ENODEV;
829
830         ret = i915_gem_object_wait(obj,
831                                    I915_WAIT_INTERRUPTIBLE |
832                                    I915_WAIT_LOCKED |
833                                    I915_WAIT_ALL,
834                                    MAX_SCHEDULE_TIMEOUT,
835                                    NULL);
836         if (ret)
837                 return ret;
838
839         ret = i915_gem_object_pin_pages(obj);
840         if (ret)
841                 return ret;
842
843         i915_gem_object_flush_gtt_write_domain(obj);
844
845         /* If we're not in the cpu write domain, set ourself into the
846          * gtt write domain and manually flush cachelines (as required).
847          * This optimizes for the case when the gpu will use the data
848          * right away and we therefore have to clflush anyway.
849          */
850         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
851                 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
852
853         /* Same trick applies to invalidate partially written cachelines read
854          * before writing.
855          */
856         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
857                 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
858                                                          obj->cache_level);
859
860         if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
861                 ret = i915_gem_object_set_to_cpu_domain(obj, true);
862                 if (ret)
863                         goto err_unpin;
864
865                 *needs_clflush = 0;
866         }
867
868         if ((*needs_clflush & CLFLUSH_AFTER) == 0)
869                 obj->cache_dirty = true;
870
871         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
872         obj->mm.dirty = true;
873         /* return with the pages pinned */
874         return 0;
875
876 err_unpin:
877         i915_gem_object_unpin_pages(obj);
878         return ret;
879 }
880
881 static void
882 shmem_clflush_swizzled_range(char *addr, unsigned long length,
883                              bool swizzled)
884 {
885         if (unlikely(swizzled)) {
886                 unsigned long start = (unsigned long) addr;
887                 unsigned long end = (unsigned long) addr + length;
888
889                 /* For swizzling simply ensure that we always flush both
890                  * channels. Lame, but simple and it works. Swizzled
891                  * pwrite/pread is far from a hotpath - current userspace
892                  * doesn't use it at all. */
893                 start = round_down(start, 128);
894                 end = round_up(end, 128);
895
896                 drm_clflush_virt_range((void *)start, end - start);
897         } else {
898                 drm_clflush_virt_range(addr, length);
899         }
900
901 }
902
903 /* Only difference to the fast-path function is that this can handle bit17
904  * and uses non-atomic copy and kmap functions. */
905 static int
906 shmem_pread_slow(struct page *page, int offset, int length,
907                  char __user *user_data,
908                  bool page_do_bit17_swizzling, bool needs_clflush)
909 {
910         char *vaddr;
911         int ret;
912
913         vaddr = kmap(page);
914         if (needs_clflush)
915                 shmem_clflush_swizzled_range(vaddr + offset, length,
916                                              page_do_bit17_swizzling);
917
918         if (page_do_bit17_swizzling)
919                 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
920         else
921                 ret = __copy_to_user(user_data, vaddr + offset, length);
922         kunmap(page);
923
924         return ret ? - EFAULT : 0;
925 }
926
927 static int
928 shmem_pread(struct page *page, int offset, int length, char __user *user_data,
929             bool page_do_bit17_swizzling, bool needs_clflush)
930 {
931         int ret;
932
933         ret = -ENODEV;
934         if (!page_do_bit17_swizzling) {
935                 char *vaddr = kmap_atomic(page);
936
937                 if (needs_clflush)
938                         drm_clflush_virt_range(vaddr + offset, length);
939                 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
940                 kunmap_atomic(vaddr);
941         }
942         if (ret == 0)
943                 return 0;
944
945         return shmem_pread_slow(page, offset, length, user_data,
946                                 page_do_bit17_swizzling, needs_clflush);
947 }
948
949 static int
950 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
951                      struct drm_i915_gem_pread *args)
952 {
953         char __user *user_data;
954         u64 remain;
955         unsigned int obj_do_bit17_swizzling;
956         unsigned int needs_clflush;
957         unsigned int idx, offset;
958         int ret;
959
960         obj_do_bit17_swizzling = 0;
961         if (i915_gem_object_needs_bit17_swizzle(obj))
962                 obj_do_bit17_swizzling = BIT(17);
963
964         ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
965         if (ret)
966                 return ret;
967
968         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
969         mutex_unlock(&obj->base.dev->struct_mutex);
970         if (ret)
971                 return ret;
972
973         remain = args->size;
974         user_data = u64_to_user_ptr(args->data_ptr);
975         offset = offset_in_page(args->offset);
976         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
977                 struct page *page = i915_gem_object_get_page(obj, idx);
978                 int length;
979
980                 length = remain;
981                 if (offset + length > PAGE_SIZE)
982                         length = PAGE_SIZE - offset;
983
984                 ret = shmem_pread(page, offset, length, user_data,
985                                   page_to_phys(page) & obj_do_bit17_swizzling,
986                                   needs_clflush);
987                 if (ret)
988                         break;
989
990                 remain -= length;
991                 user_data += length;
992                 offset = 0;
993         }
994
995         i915_gem_obj_finish_shmem_access(obj);
996         return ret;
997 }
998
999 static inline bool
1000 gtt_user_read(struct io_mapping *mapping,
1001               loff_t base, int offset,
1002               char __user *user_data, int length)
1003 {
1004         void *vaddr;
1005         unsigned long unwritten;
1006
1007         /* We can use the cpu mem copy function because this is X86. */
1008         vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1009         unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
1010         io_mapping_unmap_atomic(vaddr);
1011         if (unwritten) {
1012                 vaddr = (void __force *)
1013                         io_mapping_map_wc(mapping, base, PAGE_SIZE);
1014                 unwritten = copy_to_user(user_data, vaddr + offset, length);
1015                 io_mapping_unmap(vaddr);
1016         }
1017         return unwritten;
1018 }
1019
1020 static int
1021 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1022                    const struct drm_i915_gem_pread *args)
1023 {
1024         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1025         struct i915_ggtt *ggtt = &i915->ggtt;
1026         struct drm_mm_node node;
1027         struct i915_vma *vma;
1028         void __user *user_data;
1029         u64 remain, offset;
1030         int ret;
1031
1032         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1033         if (ret)
1034                 return ret;
1035
1036         intel_runtime_pm_get(i915);
1037         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1038                                        PIN_MAPPABLE | PIN_NONBLOCK);
1039         if (!IS_ERR(vma)) {
1040                 node.start = i915_ggtt_offset(vma);
1041                 node.allocated = false;
1042                 ret = i915_vma_put_fence(vma);
1043                 if (ret) {
1044                         i915_vma_unpin(vma);
1045                         vma = ERR_PTR(ret);
1046                 }
1047         }
1048         if (IS_ERR(vma)) {
1049                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1050                 if (ret)
1051                         goto out_unlock;
1052                 GEM_BUG_ON(!node.allocated);
1053         }
1054
1055         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1056         if (ret)
1057                 goto out_unpin;
1058
1059         mutex_unlock(&i915->drm.struct_mutex);
1060
1061         user_data = u64_to_user_ptr(args->data_ptr);
1062         remain = args->size;
1063         offset = args->offset;
1064
1065         while (remain > 0) {
1066                 /* Operation in this page
1067                  *
1068                  * page_base = page offset within aperture
1069                  * page_offset = offset within page
1070                  * page_length = bytes to copy for this page
1071                  */
1072                 u32 page_base = node.start;
1073                 unsigned page_offset = offset_in_page(offset);
1074                 unsigned page_length = PAGE_SIZE - page_offset;
1075                 page_length = remain < page_length ? remain : page_length;
1076                 if (node.allocated) {
1077                         wmb();
1078                         ggtt->base.insert_page(&ggtt->base,
1079                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1080                                                node.start, I915_CACHE_NONE, 0);
1081                         wmb();
1082                 } else {
1083                         page_base += offset & PAGE_MASK;
1084                 }
1085
1086                 if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
1087                                   user_data, page_length)) {
1088                         ret = -EFAULT;
1089                         break;
1090                 }
1091
1092                 remain -= page_length;
1093                 user_data += page_length;
1094                 offset += page_length;
1095         }
1096
1097         mutex_lock(&i915->drm.struct_mutex);
1098 out_unpin:
1099         if (node.allocated) {
1100                 wmb();
1101                 ggtt->base.clear_range(&ggtt->base,
1102                                        node.start, node.size);
1103                 remove_mappable_node(&node);
1104         } else {
1105                 i915_vma_unpin(vma);
1106         }
1107 out_unlock:
1108         intel_runtime_pm_put(i915);
1109         mutex_unlock(&i915->drm.struct_mutex);
1110
1111         return ret;
1112 }
1113
1114 /**
1115  * Reads data from the object referenced by handle.
1116  * @dev: drm device pointer
1117  * @data: ioctl data blob
1118  * @file: drm file pointer
1119  *
1120  * On error, the contents of *data are undefined.
1121  */
1122 int
1123 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1124                      struct drm_file *file)
1125 {
1126         struct drm_i915_gem_pread *args = data;
1127         struct drm_i915_gem_object *obj;
1128         int ret;
1129
1130         if (args->size == 0)
1131                 return 0;
1132
1133         if (!access_ok(VERIFY_WRITE,
1134                        u64_to_user_ptr(args->data_ptr),
1135                        args->size))
1136                 return -EFAULT;
1137
1138         obj = i915_gem_object_lookup(file, args->handle);
1139         if (!obj)
1140                 return -ENOENT;
1141
1142         /* Bounds check source.  */
1143         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1144                 ret = -EINVAL;
1145                 goto out;
1146         }
1147
1148         trace_i915_gem_object_pread(obj, args->offset, args->size);
1149
1150         ret = i915_gem_object_wait(obj,
1151                                    I915_WAIT_INTERRUPTIBLE,
1152                                    MAX_SCHEDULE_TIMEOUT,
1153                                    to_rps_client(file));
1154         if (ret)
1155                 goto out;
1156
1157         ret = i915_gem_object_pin_pages(obj);
1158         if (ret)
1159                 goto out;
1160
1161         ret = i915_gem_shmem_pread(obj, args);
1162         if (ret == -EFAULT || ret == -ENODEV)
1163                 ret = i915_gem_gtt_pread(obj, args);
1164
1165         i915_gem_object_unpin_pages(obj);
1166 out:
1167         i915_gem_object_put(obj);
1168         return ret;
1169 }
1170
1171 /* This is the fast write path which cannot handle
1172  * page faults in the source data
1173  */
1174
1175 static inline bool
1176 ggtt_write(struct io_mapping *mapping,
1177            loff_t base, int offset,
1178            char __user *user_data, int length)
1179 {
1180         void *vaddr;
1181         unsigned long unwritten;
1182
1183         /* We can use the cpu mem copy function because this is X86. */
1184         vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
1185         unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
1186                                                       user_data, length);
1187         io_mapping_unmap_atomic(vaddr);
1188         if (unwritten) {
1189                 vaddr = (void __force *)
1190                         io_mapping_map_wc(mapping, base, PAGE_SIZE);
1191                 unwritten = copy_from_user(vaddr + offset, user_data, length);
1192                 io_mapping_unmap(vaddr);
1193         }
1194
1195         return unwritten;
1196 }
1197
1198 /**
1199  * This is the fast pwrite path, where we copy the data directly from the
1200  * user into the GTT, uncached.
1201  * @obj: i915 GEM object
1202  * @args: pwrite arguments structure
1203  */
1204 static int
1205 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1206                          const struct drm_i915_gem_pwrite *args)
1207 {
1208         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1209         struct i915_ggtt *ggtt = &i915->ggtt;
1210         struct drm_mm_node node;
1211         struct i915_vma *vma;
1212         u64 remain, offset;
1213         void __user *user_data;
1214         int ret;
1215
1216         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1217         if (ret)
1218                 return ret;
1219
1220         intel_runtime_pm_get(i915);
1221         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1222                                        PIN_MAPPABLE | PIN_NONBLOCK);
1223         if (!IS_ERR(vma)) {
1224                 node.start = i915_ggtt_offset(vma);
1225                 node.allocated = false;
1226                 ret = i915_vma_put_fence(vma);
1227                 if (ret) {
1228                         i915_vma_unpin(vma);
1229                         vma = ERR_PTR(ret);
1230                 }
1231         }
1232         if (IS_ERR(vma)) {
1233                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1234                 if (ret)
1235                         goto out_unlock;
1236                 GEM_BUG_ON(!node.allocated);
1237         }
1238
1239         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1240         if (ret)
1241                 goto out_unpin;
1242
1243         mutex_unlock(&i915->drm.struct_mutex);
1244
1245         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1246
1247         user_data = u64_to_user_ptr(args->data_ptr);
1248         offset = args->offset;
1249         remain = args->size;
1250         while (remain) {
1251                 /* Operation in this page
1252                  *
1253                  * page_base = page offset within aperture
1254                  * page_offset = offset within page
1255                  * page_length = bytes to copy for this page
1256                  */
1257                 u32 page_base = node.start;
1258                 unsigned int page_offset = offset_in_page(offset);
1259                 unsigned int page_length = PAGE_SIZE - page_offset;
1260                 page_length = remain < page_length ? remain : page_length;
1261                 if (node.allocated) {
1262                         wmb(); /* flush the write before we modify the GGTT */
1263                         ggtt->base.insert_page(&ggtt->base,
1264                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1265                                                node.start, I915_CACHE_NONE, 0);
1266                         wmb(); /* flush modifications to the GGTT (insert_page) */
1267                 } else {
1268                         page_base += offset & PAGE_MASK;
1269                 }
1270                 /* If we get a fault while copying data, then (presumably) our
1271                  * source page isn't available.  Return the error and we'll
1272                  * retry in the slow path.
1273                  * If the object is non-shmem backed, we retry again with the
1274                  * path that handles page fault.
1275                  */
1276                 if (ggtt_write(&ggtt->mappable, page_base, page_offset,
1277                                user_data, page_length)) {
1278                         ret = -EFAULT;
1279                         break;
1280                 }
1281
1282                 remain -= page_length;
1283                 user_data += page_length;
1284                 offset += page_length;
1285         }
1286         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1287
1288         mutex_lock(&i915->drm.struct_mutex);
1289 out_unpin:
1290         if (node.allocated) {
1291                 wmb();
1292                 ggtt->base.clear_range(&ggtt->base,
1293                                        node.start, node.size);
1294                 remove_mappable_node(&node);
1295         } else {
1296                 i915_vma_unpin(vma);
1297         }
1298 out_unlock:
1299         intel_runtime_pm_put(i915);
1300         mutex_unlock(&i915->drm.struct_mutex);
1301         return ret;
1302 }
1303
1304 static int
1305 shmem_pwrite_slow(struct page *page, int offset, int length,
1306                   char __user *user_data,
1307                   bool page_do_bit17_swizzling,
1308                   bool needs_clflush_before,
1309                   bool needs_clflush_after)
1310 {
1311         char *vaddr;
1312         int ret;
1313
1314         vaddr = kmap(page);
1315         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1316                 shmem_clflush_swizzled_range(vaddr + offset, length,
1317                                              page_do_bit17_swizzling);
1318         if (page_do_bit17_swizzling)
1319                 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1320                                                 length);
1321         else
1322                 ret = __copy_from_user(vaddr + offset, user_data, length);
1323         if (needs_clflush_after)
1324                 shmem_clflush_swizzled_range(vaddr + offset, length,
1325                                              page_do_bit17_swizzling);
1326         kunmap(page);
1327
1328         return ret ? -EFAULT : 0;
1329 }
1330
1331 /* Per-page copy function for the shmem pwrite fastpath.
1332  * Flushes invalid cachelines before writing to the target if
1333  * needs_clflush_before is set and flushes out any written cachelines after
1334  * writing if needs_clflush is set.
1335  */
1336 static int
1337 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1338              bool page_do_bit17_swizzling,
1339              bool needs_clflush_before,
1340              bool needs_clflush_after)
1341 {
1342         int ret;
1343
1344         ret = -ENODEV;
1345         if (!page_do_bit17_swizzling) {
1346                 char *vaddr = kmap_atomic(page);
1347
1348                 if (needs_clflush_before)
1349                         drm_clflush_virt_range(vaddr + offset, len);
1350                 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1351                 if (needs_clflush_after)
1352                         drm_clflush_virt_range(vaddr + offset, len);
1353
1354                 kunmap_atomic(vaddr);
1355         }
1356         if (ret == 0)
1357                 return ret;
1358
1359         return shmem_pwrite_slow(page, offset, len, user_data,
1360                                  page_do_bit17_swizzling,
1361                                  needs_clflush_before,
1362                                  needs_clflush_after);
1363 }
1364
1365 static int
1366 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1367                       const struct drm_i915_gem_pwrite *args)
1368 {
1369         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1370         void __user *user_data;
1371         u64 remain;
1372         unsigned int obj_do_bit17_swizzling;
1373         unsigned int partial_cacheline_write;
1374         unsigned int needs_clflush;
1375         unsigned int offset, idx;
1376         int ret;
1377
1378         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1379         if (ret)
1380                 return ret;
1381
1382         ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1383         mutex_unlock(&i915->drm.struct_mutex);
1384         if (ret)
1385                 return ret;
1386
1387         obj_do_bit17_swizzling = 0;
1388         if (i915_gem_object_needs_bit17_swizzle(obj))
1389                 obj_do_bit17_swizzling = BIT(17);
1390
1391         /* If we don't overwrite a cacheline completely we need to be
1392          * careful to have up-to-date data by first clflushing. Don't
1393          * overcomplicate things and flush the entire patch.
1394          */
1395         partial_cacheline_write = 0;
1396         if (needs_clflush & CLFLUSH_BEFORE)
1397                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1398
1399         user_data = u64_to_user_ptr(args->data_ptr);
1400         remain = args->size;
1401         offset = offset_in_page(args->offset);
1402         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1403                 struct page *page = i915_gem_object_get_page(obj, idx);
1404                 int length;
1405
1406                 length = remain;
1407                 if (offset + length > PAGE_SIZE)
1408                         length = PAGE_SIZE - offset;
1409
1410                 ret = shmem_pwrite(page, offset, length, user_data,
1411                                    page_to_phys(page) & obj_do_bit17_swizzling,
1412                                    (offset | length) & partial_cacheline_write,
1413                                    needs_clflush & CLFLUSH_AFTER);
1414                 if (ret)
1415                         break;
1416
1417                 remain -= length;
1418                 user_data += length;
1419                 offset = 0;
1420         }
1421
1422         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1423         i915_gem_obj_finish_shmem_access(obj);
1424         return ret;
1425 }
1426
1427 /**
1428  * Writes data to the object referenced by handle.
1429  * @dev: drm device
1430  * @data: ioctl data blob
1431  * @file: drm file
1432  *
1433  * On error, the contents of the buffer that were to be modified are undefined.
1434  */
1435 int
1436 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1437                       struct drm_file *file)
1438 {
1439         struct drm_i915_gem_pwrite *args = data;
1440         struct drm_i915_gem_object *obj;
1441         int ret;
1442
1443         if (args->size == 0)
1444                 return 0;
1445
1446         if (!access_ok(VERIFY_READ,
1447                        u64_to_user_ptr(args->data_ptr),
1448                        args->size))
1449                 return -EFAULT;
1450
1451         obj = i915_gem_object_lookup(file, args->handle);
1452         if (!obj)
1453                 return -ENOENT;
1454
1455         /* Bounds check destination. */
1456         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1457                 ret = -EINVAL;
1458                 goto err;
1459         }
1460
1461         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1462
1463         ret = i915_gem_object_wait(obj,
1464                                    I915_WAIT_INTERRUPTIBLE |
1465                                    I915_WAIT_ALL,
1466                                    MAX_SCHEDULE_TIMEOUT,
1467                                    to_rps_client(file));
1468         if (ret)
1469                 goto err;
1470
1471         ret = i915_gem_object_pin_pages(obj);
1472         if (ret)
1473                 goto err;
1474
1475         ret = -EFAULT;
1476         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1477          * it would end up going through the fenced access, and we'll get
1478          * different detiling behavior between reading and writing.
1479          * pread/pwrite currently are reading and writing from the CPU
1480          * perspective, requiring manual detiling by the client.
1481          */
1482         if (!i915_gem_object_has_struct_page(obj) ||
1483             cpu_write_needs_clflush(obj))
1484                 /* Note that the gtt paths might fail with non-page-backed user
1485                  * pointers (e.g. gtt mappings when moving data between
1486                  * textures). Fallback to the shmem path in that case.
1487                  */
1488                 ret = i915_gem_gtt_pwrite_fast(obj, args);
1489
1490         if (ret == -EFAULT || ret == -ENOSPC) {
1491                 if (obj->phys_handle)
1492                         ret = i915_gem_phys_pwrite(obj, args, file);
1493                 else
1494                         ret = i915_gem_shmem_pwrite(obj, args);
1495         }
1496
1497         i915_gem_object_unpin_pages(obj);
1498 err:
1499         i915_gem_object_put(obj);
1500         return ret;
1501 }
1502
1503 static inline enum fb_op_origin
1504 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1505 {
1506         return (domain == I915_GEM_DOMAIN_GTT ?
1507                 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1508 }
1509
1510 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1511 {
1512         struct drm_i915_private *i915;
1513         struct list_head *list;
1514         struct i915_vma *vma;
1515
1516         list_for_each_entry(vma, &obj->vma_list, obj_link) {
1517                 if (!i915_vma_is_ggtt(vma))
1518                         continue;
1519
1520                 if (i915_vma_is_active(vma))
1521                         continue;
1522
1523                 if (!drm_mm_node_allocated(&vma->node))
1524                         continue;
1525
1526                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1527         }
1528
1529         i915 = to_i915(obj->base.dev);
1530         list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1531         list_move_tail(&obj->global_link, list);
1532 }
1533
1534 /**
1535  * Called when user space prepares to use an object with the CPU, either
1536  * through the mmap ioctl's mapping or a GTT mapping.
1537  * @dev: drm device
1538  * @data: ioctl data blob
1539  * @file: drm file
1540  */
1541 int
1542 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1543                           struct drm_file *file)
1544 {
1545         struct drm_i915_gem_set_domain *args = data;
1546         struct drm_i915_gem_object *obj;
1547         uint32_t read_domains = args->read_domains;
1548         uint32_t write_domain = args->write_domain;
1549         int err;
1550
1551         /* Only handle setting domains to types used by the CPU. */
1552         if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1553                 return -EINVAL;
1554
1555         /* Having something in the write domain implies it's in the read
1556          * domain, and only that read domain.  Enforce that in the request.
1557          */
1558         if (write_domain != 0 && read_domains != write_domain)
1559                 return -EINVAL;
1560
1561         obj = i915_gem_object_lookup(file, args->handle);
1562         if (!obj)
1563                 return -ENOENT;
1564
1565         /* Try to flush the object off the GPU without holding the lock.
1566          * We will repeat the flush holding the lock in the normal manner
1567          * to catch cases where we are gazumped.
1568          */
1569         err = i915_gem_object_wait(obj,
1570                                    I915_WAIT_INTERRUPTIBLE |
1571                                    (write_domain ? I915_WAIT_ALL : 0),
1572                                    MAX_SCHEDULE_TIMEOUT,
1573                                    to_rps_client(file));
1574         if (err)
1575                 goto out;
1576
1577         /* Flush and acquire obj->pages so that we are coherent through
1578          * direct access in memory with previous cached writes through
1579          * shmemfs and that our cache domain tracking remains valid.
1580          * For example, if the obj->filp was moved to swap without us
1581          * being notified and releasing the pages, we would mistakenly
1582          * continue to assume that the obj remained out of the CPU cached
1583          * domain.
1584          */
1585         err = i915_gem_object_pin_pages(obj);
1586         if (err)
1587                 goto out;
1588
1589         err = i915_mutex_lock_interruptible(dev);
1590         if (err)
1591                 goto out_unpin;
1592
1593         if (read_domains & I915_GEM_DOMAIN_GTT)
1594                 err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1595         else
1596                 err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1597
1598         /* And bump the LRU for this access */
1599         i915_gem_object_bump_inactive_ggtt(obj);
1600
1601         mutex_unlock(&dev->struct_mutex);
1602
1603         if (write_domain != 0)
1604                 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1605
1606 out_unpin:
1607         i915_gem_object_unpin_pages(obj);
1608 out:
1609         i915_gem_object_put(obj);
1610         return err;
1611 }
1612
1613 /**
1614  * Called when user space has done writes to this buffer
1615  * @dev: drm device
1616  * @data: ioctl data blob
1617  * @file: drm file
1618  */
1619 int
1620 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1621                          struct drm_file *file)
1622 {
1623         struct drm_i915_gem_sw_finish *args = data;
1624         struct drm_i915_gem_object *obj;
1625         int err = 0;
1626
1627         obj = i915_gem_object_lookup(file, args->handle);
1628         if (!obj)
1629                 return -ENOENT;
1630
1631         /* Pinned buffers may be scanout, so flush the cache */
1632         if (READ_ONCE(obj->pin_display)) {
1633                 err = i915_mutex_lock_interruptible(dev);
1634                 if (!err) {
1635                         i915_gem_object_flush_cpu_write_domain(obj);
1636                         mutex_unlock(&dev->struct_mutex);
1637                 }
1638         }
1639
1640         i915_gem_object_put(obj);
1641         return err;
1642 }
1643
1644 /**
1645  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1646  *                       it is mapped to.
1647  * @dev: drm device
1648  * @data: ioctl data blob
1649  * @file: drm file
1650  *
1651  * While the mapping holds a reference on the contents of the object, it doesn't
1652  * imply a ref on the object itself.
1653  *
1654  * IMPORTANT:
1655  *
1656  * DRM driver writers who look a this function as an example for how to do GEM
1657  * mmap support, please don't implement mmap support like here. The modern way
1658  * to implement DRM mmap support is with an mmap offset ioctl (like
1659  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1660  * That way debug tooling like valgrind will understand what's going on, hiding
1661  * the mmap call in a driver private ioctl will break that. The i915 driver only
1662  * does cpu mmaps this way because we didn't know better.
1663  */
1664 int
1665 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1666                     struct drm_file *file)
1667 {
1668         struct drm_i915_gem_mmap *args = data;
1669         struct drm_i915_gem_object *obj;
1670         unsigned long addr;
1671
1672         if (args->flags & ~(I915_MMAP_WC))
1673                 return -EINVAL;
1674
1675         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1676                 return -ENODEV;
1677
1678         obj = i915_gem_object_lookup(file, args->handle);
1679         if (!obj)
1680                 return -ENOENT;
1681
1682         /* prime objects have no backing filp to GEM mmap
1683          * pages from.
1684          */
1685         if (!obj->base.filp) {
1686                 i915_gem_object_put(obj);
1687                 return -EINVAL;
1688         }
1689
1690         addr = vm_mmap(obj->base.filp, 0, args->size,
1691                        PROT_READ | PROT_WRITE, MAP_SHARED,
1692                        args->offset);
1693         if (args->flags & I915_MMAP_WC) {
1694                 struct mm_struct *mm = current->mm;
1695                 struct vm_area_struct *vma;
1696
1697                 if (down_write_killable(&mm->mmap_sem)) {
1698                         i915_gem_object_put(obj);
1699                         return -EINTR;
1700                 }
1701                 vma = find_vma(mm, addr);
1702                 if (vma)
1703                         vma->vm_page_prot =
1704                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1705                 else
1706                         addr = -ENOMEM;
1707                 up_write(&mm->mmap_sem);
1708
1709                 /* This may race, but that's ok, it only gets set */
1710                 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1711         }
1712         i915_gem_object_put(obj);
1713         if (IS_ERR((void *)addr))
1714                 return addr;
1715
1716         args->addr_ptr = (uint64_t) addr;
1717
1718         return 0;
1719 }
1720
1721 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1722 {
1723         u64 size;
1724
1725         size = i915_gem_object_get_stride(obj);
1726         size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1727
1728         return size >> PAGE_SHIFT;
1729 }
1730
1731 /**
1732  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1733  *
1734  * A history of the GTT mmap interface:
1735  *
1736  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1737  *     aligned and suitable for fencing, and still fit into the available
1738  *     mappable space left by the pinned display objects. A classic problem
1739  *     we called the page-fault-of-doom where we would ping-pong between
1740  *     two objects that could not fit inside the GTT and so the memcpy
1741  *     would page one object in at the expense of the other between every
1742  *     single byte.
1743  *
1744  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1745  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1746  *     object is too large for the available space (or simply too large
1747  *     for the mappable aperture!), a view is created instead and faulted
1748  *     into userspace. (This view is aligned and sized appropriately for
1749  *     fenced access.)
1750  *
1751  * Restrictions:
1752  *
1753  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
1754  *    hangs on some architectures, corruption on others. An attempt to service
1755  *    a GTT page fault from a snoopable object will generate a SIGBUS.
1756  *
1757  *  * the object must be able to fit into RAM (physical memory, though no
1758  *    limited to the mappable aperture).
1759  *
1760  *
1761  * Caveats:
1762  *
1763  *  * a new GTT page fault will synchronize rendering from the GPU and flush
1764  *    all data to system memory. Subsequent access will not be synchronized.
1765  *
1766  *  * all mappings are revoked on runtime device suspend.
1767  *
1768  *  * there are only 8, 16 or 32 fence registers to share between all users
1769  *    (older machines require fence register for display and blitter access
1770  *    as well). Contention of the fence registers will cause the previous users
1771  *    to be unmapped and any new access will generate new page faults.
1772  *
1773  *  * running out of memory while servicing a fault may generate a SIGBUS,
1774  *    rather than the expected SIGSEGV.
1775  */
1776 int i915_gem_mmap_gtt_version(void)
1777 {
1778         return 1;
1779 }
1780
1781 /**
1782  * i915_gem_fault - fault a page into the GTT
1783  * @area: CPU VMA in question
1784  * @vmf: fault info
1785  *
1786  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1787  * from userspace.  The fault handler takes care of binding the object to
1788  * the GTT (if needed), allocating and programming a fence register (again,
1789  * only if needed based on whether the old reg is still valid or the object
1790  * is tiled) and inserting a new PTE into the faulting process.
1791  *
1792  * Note that the faulting process may involve evicting existing objects
1793  * from the GTT and/or fence registers to make room.  So performance may
1794  * suffer if the GTT working set is large or there are few fence registers
1795  * left.
1796  *
1797  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1798  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1799  */
1800 int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1801 {
1802 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1803         struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1804         struct drm_device *dev = obj->base.dev;
1805         struct drm_i915_private *dev_priv = to_i915(dev);
1806         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1807         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1808         struct i915_vma *vma;
1809         pgoff_t page_offset;
1810         unsigned int flags;
1811         int ret;
1812
1813         /* We don't use vmf->pgoff since that has the fake offset */
1814         page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1815                 PAGE_SHIFT;
1816
1817         trace_i915_gem_object_fault(obj, page_offset, true, write);
1818
1819         /* Try to flush the object off the GPU first without holding the lock.
1820          * Upon acquiring the lock, we will perform our sanity checks and then
1821          * repeat the flush holding the lock in the normal manner to catch cases
1822          * where we are gazumped.
1823          */
1824         ret = i915_gem_object_wait(obj,
1825                                    I915_WAIT_INTERRUPTIBLE,
1826                                    MAX_SCHEDULE_TIMEOUT,
1827                                    NULL);
1828         if (ret)
1829                 goto err;
1830
1831         ret = i915_gem_object_pin_pages(obj);
1832         if (ret)
1833                 goto err;
1834
1835         intel_runtime_pm_get(dev_priv);
1836
1837         ret = i915_mutex_lock_interruptible(dev);
1838         if (ret)
1839                 goto err_rpm;
1840
1841         /* Access to snoopable pages through the GTT is incoherent. */
1842         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1843                 ret = -EFAULT;
1844                 goto err_unlock;
1845         }
1846
1847         /* If the object is smaller than a couple of partial vma, it is
1848          * not worth only creating a single partial vma - we may as well
1849          * clear enough space for the full object.
1850          */
1851         flags = PIN_MAPPABLE;
1852         if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1853                 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1854
1855         /* Now pin it into the GTT as needed */
1856         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1857         if (IS_ERR(vma)) {
1858                 struct i915_ggtt_view view;
1859                 unsigned int chunk_size;
1860
1861                 /* Use a partial view if it is bigger than available space */
1862                 chunk_size = MIN_CHUNK_PAGES;
1863                 if (i915_gem_object_is_tiled(obj))
1864                         chunk_size = roundup(chunk_size, tile_row_pages(obj));
1865
1866                 memset(&view, 0, sizeof(view));
1867                 view.type = I915_GGTT_VIEW_PARTIAL;
1868                 view.params.partial.offset = rounddown(page_offset, chunk_size);
1869                 view.params.partial.size =
1870                         min_t(unsigned int, chunk_size,
1871                               vma_pages(area) - view.params.partial.offset);
1872
1873                 /* If the partial covers the entire object, just create a
1874                  * normal VMA.
1875                  */
1876                 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1877                         view.type = I915_GGTT_VIEW_NORMAL;
1878
1879                 /* Userspace is now writing through an untracked VMA, abandon
1880                  * all hope that the hardware is able to track future writes.
1881                  */
1882                 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1883
1884                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1885         }
1886         if (IS_ERR(vma)) {
1887                 ret = PTR_ERR(vma);
1888                 goto err_unlock;
1889         }
1890
1891         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1892         if (ret)
1893                 goto err_unpin;
1894
1895         ret = i915_vma_get_fence(vma);
1896         if (ret)
1897                 goto err_unpin;
1898
1899         /* Mark as being mmapped into userspace for later revocation */
1900         assert_rpm_wakelock_held(dev_priv);
1901         if (list_empty(&obj->userfault_link))
1902                 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1903
1904         /* Finally, remap it using the new GTT offset */
1905         ret = remap_io_mapping(area,
1906                                area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1907                                (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1908                                min_t(u64, vma->size, area->vm_end - area->vm_start),
1909                                &ggtt->mappable);
1910
1911 err_unpin:
1912         __i915_vma_unpin(vma);
1913 err_unlock:
1914         mutex_unlock(&dev->struct_mutex);
1915 err_rpm:
1916         intel_runtime_pm_put(dev_priv);
1917         i915_gem_object_unpin_pages(obj);
1918 err:
1919         switch (ret) {
1920         case -EIO:
1921                 /*
1922                  * We eat errors when the gpu is terminally wedged to avoid
1923                  * userspace unduly crashing (gl has no provisions for mmaps to
1924                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1925                  * and so needs to be reported.
1926                  */
1927                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1928                         ret = VM_FAULT_SIGBUS;
1929                         break;
1930                 }
1931         case -EAGAIN:
1932                 /*
1933                  * EAGAIN means the gpu is hung and we'll wait for the error
1934                  * handler to reset everything when re-faulting in
1935                  * i915_mutex_lock_interruptible.
1936                  */
1937         case 0:
1938         case -ERESTARTSYS:
1939         case -EINTR:
1940         case -EBUSY:
1941                 /*
1942                  * EBUSY is ok: this just means that another thread
1943                  * already did the job.
1944                  */
1945                 ret = VM_FAULT_NOPAGE;
1946                 break;
1947         case -ENOMEM:
1948                 ret = VM_FAULT_OOM;
1949                 break;
1950         case -ENOSPC:
1951         case -EFAULT:
1952                 ret = VM_FAULT_SIGBUS;
1953                 break;
1954         default:
1955                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1956                 ret = VM_FAULT_SIGBUS;
1957                 break;
1958         }
1959         return ret;
1960 }
1961
1962 /**
1963  * i915_gem_release_mmap - remove physical page mappings
1964  * @obj: obj in question
1965  *
1966  * Preserve the reservation of the mmapping with the DRM core code, but
1967  * relinquish ownership of the pages back to the system.
1968  *
1969  * It is vital that we remove the page mapping if we have mapped a tiled
1970  * object through the GTT and then lose the fence register due to
1971  * resource pressure. Similarly if the object has been moved out of the
1972  * aperture, than pages mapped into userspace must be revoked. Removing the
1973  * mapping will then trigger a page fault on the next user access, allowing
1974  * fixup by i915_gem_fault().
1975  */
1976 void
1977 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1978 {
1979         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1980
1981         /* Serialisation between user GTT access and our code depends upon
1982          * revoking the CPU's PTE whilst the mutex is held. The next user
1983          * pagefault then has to wait until we release the mutex.
1984          *
1985          * Note that RPM complicates somewhat by adding an additional
1986          * requirement that operations to the GGTT be made holding the RPM
1987          * wakeref.
1988          */
1989         lockdep_assert_held(&i915->drm.struct_mutex);
1990         intel_runtime_pm_get(i915);
1991
1992         if (list_empty(&obj->userfault_link))
1993                 goto out;
1994
1995         list_del_init(&obj->userfault_link);
1996         drm_vma_node_unmap(&obj->base.vma_node,
1997                            obj->base.dev->anon_inode->i_mapping);
1998
1999         /* Ensure that the CPU's PTE are revoked and there are not outstanding
2000          * memory transactions from userspace before we return. The TLB
2001          * flushing implied above by changing the PTE above *should* be
2002          * sufficient, an extra barrier here just provides us with a bit
2003          * of paranoid documentation about our requirement to serialise
2004          * memory writes before touching registers / GSM.
2005          */
2006         wmb();
2007
2008 out:
2009         intel_runtime_pm_put(i915);
2010 }
2011
2012 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2013 {
2014         struct drm_i915_gem_object *obj, *on;
2015         int i;
2016
2017         /*
2018          * Only called during RPM suspend. All users of the userfault_list
2019          * must be holding an RPM wakeref to ensure that this can not
2020          * run concurrently with themselves (and use the struct_mutex for
2021          * protection between themselves).
2022          */
2023
2024         list_for_each_entry_safe(obj, on,
2025                                  &dev_priv->mm.userfault_list, userfault_link) {
2026                 list_del_init(&obj->userfault_link);
2027                 drm_vma_node_unmap(&obj->base.vma_node,
2028                                    obj->base.dev->anon_inode->i_mapping);
2029         }
2030
2031         /* The fence will be lost when the device powers down. If any were
2032          * in use by hardware (i.e. they are pinned), we should not be powering
2033          * down! All other fences will be reacquired by the user upon waking.
2034          */
2035         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2036                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2037
2038                 if (WARN_ON(reg->pin_count))
2039                         continue;
2040
2041                 if (!reg->vma)
2042                         continue;
2043
2044                 GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
2045                 reg->dirty = true;
2046         }
2047 }
2048
2049 /**
2050  * i915_gem_get_ggtt_size - return required global GTT size for an object
2051  * @dev_priv: i915 device
2052  * @size: object size
2053  * @tiling_mode: tiling mode
2054  *
2055  * Return the required global GTT size for an object, taking into account
2056  * potential fence register mapping.
2057  */
2058 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
2059                            u64 size, int tiling_mode)
2060 {
2061         u64 ggtt_size;
2062
2063         GEM_BUG_ON(size == 0);
2064
2065         if (INTEL_GEN(dev_priv) >= 4 ||
2066             tiling_mode == I915_TILING_NONE)
2067                 return size;
2068
2069         /* Previous chips need a power-of-two fence region when tiling */
2070         if (IS_GEN3(dev_priv))
2071                 ggtt_size = 1024*1024;
2072         else
2073                 ggtt_size = 512*1024;
2074
2075         while (ggtt_size < size)
2076                 ggtt_size <<= 1;
2077
2078         return ggtt_size;
2079 }
2080
2081 /**
2082  * i915_gem_get_ggtt_alignment - return required global GTT alignment
2083  * @dev_priv: i915 device
2084  * @size: object size
2085  * @tiling_mode: tiling mode
2086  * @fenced: is fenced alignment required or not
2087  *
2088  * Return the required global GTT alignment for an object, taking into account
2089  * potential fence register mapping.
2090  */
2091 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2092                                 int tiling_mode, bool fenced)
2093 {
2094         GEM_BUG_ON(size == 0);
2095
2096         /*
2097          * Minimum alignment is 4k (GTT page size), but might be greater
2098          * if a fence register is needed for the object.
2099          */
2100         if (INTEL_GEN(dev_priv) >= 4 ||
2101             (!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) ||
2102             tiling_mode == I915_TILING_NONE)
2103                 return 4096;
2104
2105         /*
2106          * Previous chips need to be aligned to the size of the smallest
2107          * fence register that can contain the object.
2108          */
2109         return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2110 }
2111
2112 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2113 {
2114         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2115         int err;
2116
2117         err = drm_gem_create_mmap_offset(&obj->base);
2118         if (!err)
2119                 return 0;
2120
2121         /* We can idle the GPU locklessly to flush stale objects, but in order
2122          * to claim that space for ourselves, we need to take the big
2123          * struct_mutex to free the requests+objects and allocate our slot.
2124          */
2125         err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2126         if (err)
2127                 return err;
2128
2129         err = i915_mutex_lock_interruptible(&dev_priv->drm);
2130         if (!err) {
2131                 i915_gem_retire_requests(dev_priv);
2132                 err = drm_gem_create_mmap_offset(&obj->base);
2133                 mutex_unlock(&dev_priv->drm.struct_mutex);
2134         }
2135
2136         return err;
2137 }
2138
2139 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2140 {
2141         drm_gem_free_mmap_offset(&obj->base);
2142 }
2143
2144 int
2145 i915_gem_mmap_gtt(struct drm_file *file,
2146                   struct drm_device *dev,
2147                   uint32_t handle,
2148                   uint64_t *offset)
2149 {
2150         struct drm_i915_gem_object *obj;
2151         int ret;
2152
2153         obj = i915_gem_object_lookup(file, handle);
2154         if (!obj)
2155                 return -ENOENT;
2156
2157         ret = i915_gem_object_create_mmap_offset(obj);
2158         if (ret == 0)
2159                 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2160
2161         i915_gem_object_put(obj);
2162         return ret;
2163 }
2164
2165 /**
2166  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2167  * @dev: DRM device
2168  * @data: GTT mapping ioctl data
2169  * @file: GEM object info
2170  *
2171  * Simply returns the fake offset to userspace so it can mmap it.
2172  * The mmap call will end up in drm_gem_mmap(), which will set things
2173  * up so we can get faults in the handler above.
2174  *
2175  * The fault handler will take care of binding the object into the GTT
2176  * (since it may have been evicted to make room for something), allocating
2177  * a fence register, and mapping the appropriate aperture address into
2178  * userspace.
2179  */
2180 int
2181 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2182                         struct drm_file *file)
2183 {
2184         struct drm_i915_gem_mmap_gtt *args = data;
2185
2186         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2187 }
2188
2189 /* Immediately discard the backing storage */
2190 static void
2191 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2192 {
2193         i915_gem_object_free_mmap_offset(obj);
2194
2195         if (obj->base.filp == NULL)
2196                 return;
2197
2198         /* Our goal here is to return as much of the memory as
2199          * is possible back to the system as we are called from OOM.
2200          * To do this we must instruct the shmfs to drop all of its
2201          * backing pages, *now*.
2202          */
2203         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2204         obj->mm.madv = __I915_MADV_PURGED;
2205 }
2206
2207 /* Try to discard unwanted pages */
2208 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2209 {
2210         struct address_space *mapping;
2211
2212         lockdep_assert_held(&obj->mm.lock);
2213         GEM_BUG_ON(obj->mm.pages);
2214
2215         switch (obj->mm.madv) {
2216         case I915_MADV_DONTNEED:
2217                 i915_gem_object_truncate(obj);
2218         case __I915_MADV_PURGED:
2219                 return;
2220         }
2221
2222         if (obj->base.filp == NULL)
2223                 return;
2224
2225         mapping = obj->base.filp->f_mapping,
2226         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2227 }
2228
2229 static void
2230 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2231                               struct sg_table *pages)
2232 {
2233         struct sgt_iter sgt_iter;
2234         struct page *page;
2235
2236         __i915_gem_object_release_shmem(obj, pages);
2237
2238         i915_gem_gtt_finish_pages(obj, pages);
2239
2240         if (i915_gem_object_needs_bit17_swizzle(obj))
2241                 i915_gem_object_save_bit_17_swizzle(obj, pages);
2242
2243         for_each_sgt_page(page, sgt_iter, pages) {
2244                 if (obj->mm.dirty)
2245                         set_page_dirty(page);
2246
2247                 if (obj->mm.madv == I915_MADV_WILLNEED)
2248                         mark_page_accessed(page);
2249
2250                 put_page(page);
2251         }
2252         obj->mm.dirty = false;
2253
2254         sg_free_table(pages);
2255         kfree(pages);
2256 }
2257
2258 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2259 {
2260         struct radix_tree_iter iter;
2261         void **slot;
2262
2263         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2264                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2265 }
2266
2267 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2268                                  enum i915_mm_subclass subclass)
2269 {
2270         struct sg_table *pages;
2271
2272         if (i915_gem_object_has_pinned_pages(obj))
2273                 return;
2274
2275         GEM_BUG_ON(obj->bind_count);
2276         if (!READ_ONCE(obj->mm.pages))
2277                 return;
2278
2279         /* May be called by shrinker from within get_pages() (on another bo) */
2280         mutex_lock_nested(&obj->mm.lock, subclass);
2281         if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2282                 goto unlock;
2283
2284         /* ->put_pages might need to allocate memory for the bit17 swizzle
2285          * array, hence protect them from being reaped by removing them from gtt
2286          * lists early. */
2287         pages = fetch_and_zero(&obj->mm.pages);
2288         GEM_BUG_ON(!pages);
2289
2290         if (obj->mm.mapping) {
2291                 void *ptr;
2292
2293                 ptr = ptr_mask_bits(obj->mm.mapping);
2294                 if (is_vmalloc_addr(ptr))
2295                         vunmap(ptr);
2296                 else
2297                         kunmap(kmap_to_page(ptr));
2298
2299                 obj->mm.mapping = NULL;
2300         }
2301
2302         __i915_gem_object_reset_page_iter(obj);
2303
2304         obj->ops->put_pages(obj, pages);
2305 unlock:
2306         mutex_unlock(&obj->mm.lock);
2307 }
2308
2309 static unsigned int swiotlb_max_size(void)
2310 {
2311 #if IS_ENABLED(CONFIG_SWIOTLB)
2312         return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
2313 #else
2314         return 0;
2315 #endif
2316 }
2317
2318 static void i915_sg_trim(struct sg_table *orig_st)
2319 {
2320         struct sg_table new_st;
2321         struct scatterlist *sg, *new_sg;
2322         unsigned int i;
2323
2324         if (orig_st->nents == orig_st->orig_nents)
2325                 return;
2326
2327         if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
2328                 return;
2329
2330         new_sg = new_st.sgl;
2331         for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2332                 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2333                 /* called before being DMA mapped, no need to copy sg->dma_* */
2334                 new_sg = sg_next(new_sg);
2335         }
2336
2337         sg_free_table(orig_st);
2338
2339         *orig_st = new_st;
2340 }
2341
2342 static struct sg_table *
2343 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2344 {
2345         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2346         const unsigned long page_count = obj->base.size / PAGE_SIZE;
2347         unsigned long i;
2348         struct address_space *mapping;
2349         struct sg_table *st;
2350         struct scatterlist *sg;
2351         struct sgt_iter sgt_iter;
2352         struct page *page;
2353         unsigned long last_pfn = 0;     /* suppress gcc warning */
2354         unsigned int max_segment;
2355         int ret;
2356         gfp_t gfp;
2357
2358         /* Assert that the object is not currently in any GPU domain. As it
2359          * wasn't in the GTT, there shouldn't be any way it could have been in
2360          * a GPU cache
2361          */
2362         GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2363         GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2364
2365         max_segment = swiotlb_max_size();
2366         if (!max_segment)
2367                 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2368
2369         st = kmalloc(sizeof(*st), GFP_KERNEL);
2370         if (st == NULL)
2371                 return ERR_PTR(-ENOMEM);
2372
2373 rebuild_st:
2374         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2375                 kfree(st);
2376                 return ERR_PTR(-ENOMEM);
2377         }
2378
2379         /* Get the list of pages out of our struct file.  They'll be pinned
2380          * at this point until we release them.
2381          *
2382          * Fail silently without starting the shrinker
2383          */
2384         mapping = obj->base.filp->f_mapping;
2385         gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2386         gfp |= __GFP_NORETRY | __GFP_NOWARN;
2387         sg = st->sgl;
2388         st->nents = 0;
2389         for (i = 0; i < page_count; i++) {
2390                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2391                 if (IS_ERR(page)) {
2392                         i915_gem_shrink(dev_priv,
2393                                         page_count,
2394                                         I915_SHRINK_BOUND |
2395                                         I915_SHRINK_UNBOUND |
2396                                         I915_SHRINK_PURGEABLE);
2397                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2398                 }
2399                 if (IS_ERR(page)) {
2400                         /* We've tried hard to allocate the memory by reaping
2401                          * our own buffer, now let the real VM do its job and
2402                          * go down in flames if truly OOM.
2403                          */
2404                         page = shmem_read_mapping_page(mapping, i);
2405                         if (IS_ERR(page)) {
2406                                 ret = PTR_ERR(page);
2407                                 goto err_sg;
2408                         }
2409                 }
2410                 if (!i ||
2411                     sg->length >= max_segment ||
2412                     page_to_pfn(page) != last_pfn + 1) {
2413                         if (i)
2414                                 sg = sg_next(sg);
2415                         st->nents++;
2416                         sg_set_page(sg, page, PAGE_SIZE, 0);
2417                 } else {
2418                         sg->length += PAGE_SIZE;
2419                 }
2420                 last_pfn = page_to_pfn(page);
2421
2422                 /* Check that the i965g/gm workaround works. */
2423                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2424         }
2425         if (sg) /* loop terminated early; short sg table */
2426                 sg_mark_end(sg);
2427
2428         /* Trim unused sg entries to avoid wasting memory. */
2429         i915_sg_trim(st);
2430
2431         ret = i915_gem_gtt_prepare_pages(obj, st);
2432         if (ret) {
2433                 /* DMA remapping failed? One possible cause is that
2434                  * it could not reserve enough large entries, asking
2435                  * for PAGE_SIZE chunks instead may be helpful.
2436                  */
2437                 if (max_segment > PAGE_SIZE) {
2438                         for_each_sgt_page(page, sgt_iter, st)
2439                                 put_page(page);
2440                         sg_free_table(st);
2441
2442                         max_segment = PAGE_SIZE;
2443                         goto rebuild_st;
2444                 } else {
2445                         dev_warn(&dev_priv->drm.pdev->dev,
2446                                  "Failed to DMA remap %lu pages\n",
2447                                  page_count);
2448                         goto err_pages;
2449                 }
2450         }
2451
2452         if (i915_gem_object_needs_bit17_swizzle(obj))
2453                 i915_gem_object_do_bit_17_swizzle(obj, st);
2454
2455         return st;
2456
2457 err_sg:
2458         sg_mark_end(sg);
2459 err_pages:
2460         for_each_sgt_page(page, sgt_iter, st)
2461                 put_page(page);
2462         sg_free_table(st);
2463         kfree(st);
2464
2465         /* shmemfs first checks if there is enough memory to allocate the page
2466          * and reports ENOSPC should there be insufficient, along with the usual
2467          * ENOMEM for a genuine allocation failure.
2468          *
2469          * We use ENOSPC in our driver to mean that we have run out of aperture
2470          * space and so want to translate the error from shmemfs back to our
2471          * usual understanding of ENOMEM.
2472          */
2473         if (ret == -ENOSPC)
2474                 ret = -ENOMEM;
2475
2476         return ERR_PTR(ret);
2477 }
2478
2479 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2480                                  struct sg_table *pages)
2481 {
2482         lockdep_assert_held(&obj->mm.lock);
2483
2484         obj->mm.get_page.sg_pos = pages->sgl;
2485         obj->mm.get_page.sg_idx = 0;
2486
2487         obj->mm.pages = pages;
2488
2489         if (i915_gem_object_is_tiled(obj) &&
2490             to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2491                 GEM_BUG_ON(obj->mm.quirked);
2492                 __i915_gem_object_pin_pages(obj);
2493                 obj->mm.quirked = true;
2494         }
2495 }
2496
2497 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2498 {
2499         struct sg_table *pages;
2500
2501         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2502
2503         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2504                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2505                 return -EFAULT;
2506         }
2507
2508         pages = obj->ops->get_pages(obj);
2509         if (unlikely(IS_ERR(pages)))
2510                 return PTR_ERR(pages);
2511
2512         __i915_gem_object_set_pages(obj, pages);
2513         return 0;
2514 }
2515
2516 /* Ensure that the associated pages are gathered from the backing storage
2517  * and pinned into our object. i915_gem_object_pin_pages() may be called
2518  * multiple times before they are released by a single call to
2519  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2520  * either as a result of memory pressure (reaping pages under the shrinker)
2521  * or as the object is itself released.
2522  */
2523 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2524 {
2525         int err;
2526
2527         err = mutex_lock_interruptible(&obj->mm.lock);
2528         if (err)
2529                 return err;
2530
2531         if (unlikely(!obj->mm.pages)) {
2532                 err = ____i915_gem_object_get_pages(obj);
2533                 if (err)
2534                         goto unlock;
2535
2536                 smp_mb__before_atomic();
2537         }
2538         atomic_inc(&obj->mm.pages_pin_count);
2539
2540 unlock:
2541         mutex_unlock(&obj->mm.lock);
2542         return err;
2543 }
2544
2545 /* The 'mapping' part of i915_gem_object_pin_map() below */
2546 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2547                                  enum i915_map_type type)
2548 {
2549         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2550         struct sg_table *sgt = obj->mm.pages;
2551         struct sgt_iter sgt_iter;
2552         struct page *page;
2553         struct page *stack_pages[32];
2554         struct page **pages = stack_pages;
2555         unsigned long i = 0;
2556         pgprot_t pgprot;
2557         void *addr;
2558
2559         /* A single page can always be kmapped */
2560         if (n_pages == 1 && type == I915_MAP_WB)
2561                 return kmap(sg_page(sgt->sgl));
2562
2563         if (n_pages > ARRAY_SIZE(stack_pages)) {
2564                 /* Too big for stack -- allocate temporary array instead */
2565                 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2566                 if (!pages)
2567                         return NULL;
2568         }
2569
2570         for_each_sgt_page(page, sgt_iter, sgt)
2571                 pages[i++] = page;
2572
2573         /* Check that we have the expected number of pages */
2574         GEM_BUG_ON(i != n_pages);
2575
2576         switch (type) {
2577         case I915_MAP_WB:
2578                 pgprot = PAGE_KERNEL;
2579                 break;
2580         case I915_MAP_WC:
2581                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2582                 break;
2583         }
2584         addr = vmap(pages, n_pages, 0, pgprot);
2585
2586         if (pages != stack_pages)
2587                 drm_free_large(pages);
2588
2589         return addr;
2590 }
2591
2592 /* get, pin, and map the pages of the object into kernel space */
2593 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2594                               enum i915_map_type type)
2595 {
2596         enum i915_map_type has_type;
2597         bool pinned;
2598         void *ptr;
2599         int ret;
2600
2601         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2602
2603         ret = mutex_lock_interruptible(&obj->mm.lock);
2604         if (ret)
2605                 return ERR_PTR(ret);
2606
2607         pinned = true;
2608         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2609                 if (unlikely(!obj->mm.pages)) {
2610                         ret = ____i915_gem_object_get_pages(obj);
2611                         if (ret)
2612                                 goto err_unlock;
2613
2614                         smp_mb__before_atomic();
2615                 }
2616                 atomic_inc(&obj->mm.pages_pin_count);
2617                 pinned = false;
2618         }
2619         GEM_BUG_ON(!obj->mm.pages);
2620
2621         ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
2622         if (ptr && has_type != type) {
2623                 if (pinned) {
2624                         ret = -EBUSY;
2625                         goto err_unpin;
2626                 }
2627
2628                 if (is_vmalloc_addr(ptr))
2629                         vunmap(ptr);
2630                 else
2631                         kunmap(kmap_to_page(ptr));
2632
2633                 ptr = obj->mm.mapping = NULL;
2634         }
2635
2636         if (!ptr) {
2637                 ptr = i915_gem_object_map(obj, type);
2638                 if (!ptr) {
2639                         ret = -ENOMEM;
2640                         goto err_unpin;
2641                 }
2642
2643                 obj->mm.mapping = ptr_pack_bits(ptr, type);
2644         }
2645
2646 out_unlock:
2647         mutex_unlock(&obj->mm.lock);
2648         return ptr;
2649
2650 err_unpin:
2651         atomic_dec(&obj->mm.pages_pin_count);
2652 err_unlock:
2653         ptr = ERR_PTR(ret);
2654         goto out_unlock;
2655 }
2656
2657 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2658 {
2659         if (ctx->banned)
2660                 return true;
2661
2662         if (!ctx->bannable)
2663                 return false;
2664
2665         if (ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD) {
2666                 DRM_DEBUG("context hanging too often, banning!\n");
2667                 return true;
2668         }
2669
2670         return false;
2671 }
2672
2673 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2674 {
2675         ctx->ban_score += CONTEXT_SCORE_GUILTY;
2676
2677         ctx->banned = i915_context_is_banned(ctx);
2678         ctx->guilty_count++;
2679
2680         DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
2681                          ctx->name, ctx->ban_score,
2682                          yesno(ctx->banned));
2683
2684         if (!ctx->banned || IS_ERR_OR_NULL(ctx->file_priv))
2685                 return;
2686
2687         ctx->file_priv->context_bans++;
2688         DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
2689                          ctx->name, ctx->file_priv->context_bans);
2690 }
2691
2692 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
2693 {
2694         ctx->active_count++;
2695 }
2696
2697 struct drm_i915_gem_request *
2698 i915_gem_find_active_request(struct intel_engine_cs *engine)
2699 {
2700         struct drm_i915_gem_request *request;
2701
2702         /* We are called by the error capture and reset at a random
2703          * point in time. In particular, note that neither is crucially
2704          * ordered with an interrupt. After a hang, the GPU is dead and we
2705          * assume that no more writes can happen (we waited long enough for
2706          * all writes that were in transaction to be flushed) - adding an
2707          * extra delay for a recent interrupt is pointless. Hence, we do
2708          * not need an engine->irq_seqno_barrier() before the seqno reads.
2709          */
2710         list_for_each_entry(request, &engine->timeline->requests, link) {
2711                 if (__i915_gem_request_completed(request))
2712                         continue;
2713
2714                 return request;
2715         }
2716
2717         return NULL;
2718 }
2719
2720 static void reset_request(struct drm_i915_gem_request *request)
2721 {
2722         void *vaddr = request->ring->vaddr;
2723         u32 head;
2724
2725         /* As this request likely depends on state from the lost
2726          * context, clear out all the user operations leaving the
2727          * breadcrumb at the end (so we get the fence notifications).
2728          */
2729         head = request->head;
2730         if (request->postfix < head) {
2731                 memset(vaddr + head, 0, request->ring->size - head);
2732                 head = 0;
2733         }
2734         memset(vaddr + head, 0, request->postfix - head);
2735 }
2736
2737 static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2738 {
2739         struct drm_i915_gem_request *request;
2740         struct i915_gem_context *incomplete_ctx;
2741         struct intel_timeline *timeline;
2742         bool ring_hung;
2743
2744         if (engine->irq_seqno_barrier)
2745                 engine->irq_seqno_barrier(engine);
2746
2747         request = i915_gem_find_active_request(engine);
2748         if (!request)
2749                 return;
2750
2751         ring_hung = engine->hangcheck.stalled;
2752         if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
2753                 DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n",
2754                                  engine->name,
2755                                  yesno(ring_hung));
2756                 ring_hung = false;
2757         }
2758
2759         if (ring_hung)
2760                 i915_gem_context_mark_guilty(request->ctx);
2761         else
2762                 i915_gem_context_mark_innocent(request->ctx);
2763
2764         if (!ring_hung)
2765                 return;
2766
2767         DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2768                          engine->name, request->global_seqno);
2769
2770         /* Setup the CS to resume from the breadcrumb of the hung request */
2771         engine->reset_hw(engine, request);
2772
2773         /* Users of the default context do not rely on logical state
2774          * preserved between batches. They have to emit full state on
2775          * every batch and so it is safe to execute queued requests following
2776          * the hang.
2777          *
2778          * Other contexts preserve state, now corrupt. We want to skip all
2779          * queued requests that reference the corrupt context.
2780          */
2781         incomplete_ctx = request->ctx;
2782         if (i915_gem_context_is_default(incomplete_ctx))
2783                 return;
2784
2785         list_for_each_entry_continue(request, &engine->timeline->requests, link)
2786                 if (request->ctx == incomplete_ctx)
2787                         reset_request(request);
2788
2789         timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
2790         list_for_each_entry(request, &timeline->requests, link)
2791                 reset_request(request);
2792 }
2793
2794 void i915_gem_reset(struct drm_i915_private *dev_priv)
2795 {
2796         struct intel_engine_cs *engine;
2797         enum intel_engine_id id;
2798
2799         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2800
2801         i915_gem_retire_requests(dev_priv);
2802
2803         for_each_engine(engine, dev_priv, id)
2804                 i915_gem_reset_engine(engine);
2805
2806         i915_gem_restore_fences(dev_priv);
2807
2808         if (dev_priv->gt.awake) {
2809                 intel_sanitize_gt_powersave(dev_priv);
2810                 intel_enable_gt_powersave(dev_priv);
2811                 if (INTEL_GEN(dev_priv) >= 6)
2812                         gen6_rps_busy(dev_priv);
2813         }
2814 }
2815
2816 static void nop_submit_request(struct drm_i915_gem_request *request)
2817 {
2818         i915_gem_request_submit(request);
2819         intel_engine_init_global_seqno(request->engine, request->global_seqno);
2820 }
2821
2822 static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
2823 {
2824         /* We need to be sure that no thread is running the old callback as
2825          * we install the nop handler (otherwise we would submit a request
2826          * to hardware that will never complete). In order to prevent this
2827          * race, we wait until the machine is idle before making the swap
2828          * (using stop_machine()).
2829          */
2830         engine->submit_request = nop_submit_request;
2831
2832         /* Mark all pending requests as complete so that any concurrent
2833          * (lockless) lookup doesn't try and wait upon the request as we
2834          * reset it.
2835          */
2836         intel_engine_init_global_seqno(engine,
2837                                        intel_engine_last_submit(engine));
2838
2839         /*
2840          * Clear the execlists queue up before freeing the requests, as those
2841          * are the ones that keep the context and ringbuffer backing objects
2842          * pinned in place.
2843          */
2844
2845         if (i915.enable_execlists) {
2846                 unsigned long flags;
2847
2848                 spin_lock_irqsave(&engine->timeline->lock, flags);
2849
2850                 i915_gem_request_put(engine->execlist_port[0].request);
2851                 i915_gem_request_put(engine->execlist_port[1].request);
2852                 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2853                 engine->execlist_queue = RB_ROOT;
2854                 engine->execlist_first = NULL;
2855
2856                 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2857         }
2858 }
2859
2860 static int __i915_gem_set_wedged_BKL(void *data)
2861 {
2862         struct drm_i915_private *i915 = data;
2863         struct intel_engine_cs *engine;
2864         enum intel_engine_id id;
2865
2866         for_each_engine(engine, i915, id)
2867                 i915_gem_cleanup_engine(engine);
2868
2869         return 0;
2870 }
2871
2872 void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2873 {
2874         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2875         set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2876
2877         stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
2878
2879         i915_gem_context_lost(dev_priv);
2880         i915_gem_retire_requests(dev_priv);
2881
2882         mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2883 }
2884
2885 static void
2886 i915_gem_retire_work_handler(struct work_struct *work)
2887 {
2888         struct drm_i915_private *dev_priv =
2889                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2890         struct drm_device *dev = &dev_priv->drm;
2891
2892         /* Come back later if the device is busy... */
2893         if (mutex_trylock(&dev->struct_mutex)) {
2894                 i915_gem_retire_requests(dev_priv);
2895                 mutex_unlock(&dev->struct_mutex);
2896         }
2897
2898         /* Keep the retire handler running until we are finally idle.
2899          * We do not need to do this test under locking as in the worst-case
2900          * we queue the retire worker once too often.
2901          */
2902         if (READ_ONCE(dev_priv->gt.awake)) {
2903                 i915_queue_hangcheck(dev_priv);
2904                 queue_delayed_work(dev_priv->wq,
2905                                    &dev_priv->gt.retire_work,
2906                                    round_jiffies_up_relative(HZ));
2907         }
2908 }
2909
2910 static void
2911 i915_gem_idle_work_handler(struct work_struct *work)
2912 {
2913         struct drm_i915_private *dev_priv =
2914                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2915         struct drm_device *dev = &dev_priv->drm;
2916         struct intel_engine_cs *engine;
2917         enum intel_engine_id id;
2918         bool rearm_hangcheck;
2919
2920         if (!READ_ONCE(dev_priv->gt.awake))
2921                 return;
2922
2923         /*
2924          * Wait for last execlists context complete, but bail out in case a
2925          * new request is submitted.
2926          */
2927         wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
2928                  intel_execlists_idle(dev_priv), 10);
2929
2930         if (READ_ONCE(dev_priv->gt.active_requests))
2931                 return;
2932
2933         rearm_hangcheck =
2934                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2935
2936         if (!mutex_trylock(&dev->struct_mutex)) {
2937                 /* Currently busy, come back later */
2938                 mod_delayed_work(dev_priv->wq,
2939                                  &dev_priv->gt.idle_work,
2940                                  msecs_to_jiffies(50));
2941                 goto out_rearm;
2942         }
2943
2944         /*
2945          * New request retired after this work handler started, extend active
2946          * period until next instance of the work.
2947          */
2948         if (work_pending(work))
2949                 goto out_unlock;
2950
2951         if (dev_priv->gt.active_requests)
2952                 goto out_unlock;
2953
2954         if (wait_for(intel_execlists_idle(dev_priv), 10))
2955                 DRM_ERROR("Timeout waiting for engines to idle\n");
2956
2957         for_each_engine(engine, dev_priv, id)
2958                 i915_gem_batch_pool_fini(&engine->batch_pool);
2959
2960         GEM_BUG_ON(!dev_priv->gt.awake);
2961         dev_priv->gt.awake = false;
2962         rearm_hangcheck = false;
2963
2964         if (INTEL_GEN(dev_priv) >= 6)
2965                 gen6_rps_idle(dev_priv);
2966         intel_runtime_pm_put(dev_priv);
2967 out_unlock:
2968         mutex_unlock(&dev->struct_mutex);
2969
2970 out_rearm:
2971         if (rearm_hangcheck) {
2972                 GEM_BUG_ON(!dev_priv->gt.awake);
2973                 i915_queue_hangcheck(dev_priv);
2974         }
2975 }
2976
2977 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2978 {
2979         struct drm_i915_gem_object *obj = to_intel_bo(gem);
2980         struct drm_i915_file_private *fpriv = file->driver_priv;
2981         struct i915_vma *vma, *vn;
2982
2983         mutex_lock(&obj->base.dev->struct_mutex);
2984         list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2985                 if (vma->vm->file == fpriv)
2986                         i915_vma_close(vma);
2987
2988         if (i915_gem_object_is_active(obj) &&
2989             !i915_gem_object_has_active_reference(obj)) {
2990                 i915_gem_object_set_active_reference(obj);
2991                 i915_gem_object_get(obj);
2992         }
2993         mutex_unlock(&obj->base.dev->struct_mutex);
2994 }
2995
2996 static unsigned long to_wait_timeout(s64 timeout_ns)
2997 {
2998         if (timeout_ns < 0)
2999                 return MAX_SCHEDULE_TIMEOUT;
3000
3001         if (timeout_ns == 0)
3002                 return 0;
3003
3004         return nsecs_to_jiffies_timeout(timeout_ns);
3005 }
3006
3007 /**
3008  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3009  * @dev: drm device pointer
3010  * @data: ioctl data blob
3011  * @file: drm file pointer
3012  *
3013  * Returns 0 if successful, else an error is returned with the remaining time in
3014  * the timeout parameter.
3015  *  -ETIME: object is still busy after timeout
3016  *  -ERESTARTSYS: signal interrupted the wait
3017  *  -ENONENT: object doesn't exist
3018  * Also possible, but rare:
3019  *  -EAGAIN: GPU wedged
3020  *  -ENOMEM: damn
3021  *  -ENODEV: Internal IRQ fail
3022  *  -E?: The add request failed
3023  *
3024  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3025  * non-zero timeout parameter the wait ioctl will wait for the given number of
3026  * nanoseconds on an object becoming unbusy. Since the wait itself does so
3027  * without holding struct_mutex the object may become re-busied before this
3028  * function completes. A similar but shorter * race condition exists in the busy
3029  * ioctl
3030  */
3031 int
3032 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3033 {
3034         struct drm_i915_gem_wait *args = data;
3035         struct drm_i915_gem_object *obj;
3036         ktime_t start;
3037         long ret;
3038
3039         if (args->flags != 0)
3040                 return -EINVAL;
3041
3042         obj = i915_gem_object_lookup(file, args->bo_handle);
3043         if (!obj)
3044                 return -ENOENT;
3045
3046         start = ktime_get();
3047
3048         ret = i915_gem_object_wait(obj,
3049                                    I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
3050                                    to_wait_timeout(args->timeout_ns),
3051                                    to_rps_client(file));
3052
3053         if (args->timeout_ns > 0) {
3054                 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3055                 if (args->timeout_ns < 0)
3056                         args->timeout_ns = 0;
3057         }
3058
3059         i915_gem_object_put(obj);
3060         return ret;
3061 }
3062
3063 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3064 {
3065         int ret, i;
3066
3067         for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3068                 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3069                 if (ret)
3070                         return ret;
3071         }
3072
3073         return 0;
3074 }
3075
3076 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3077 {
3078         int ret;
3079
3080         if (flags & I915_WAIT_LOCKED) {
3081                 struct i915_gem_timeline *tl;
3082
3083                 lockdep_assert_held(&i915->drm.struct_mutex);
3084
3085                 list_for_each_entry(tl, &i915->gt.timelines, link) {
3086                         ret = wait_for_timeline(tl, flags);
3087                         if (ret)
3088                                 return ret;
3089                 }
3090         } else {
3091                 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3092                 if (ret)
3093                         return ret;
3094         }
3095
3096         return 0;
3097 }
3098
3099 void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3100                              bool force)
3101 {
3102         /* If we don't have a page list set up, then we're not pinned
3103          * to GPU, and we can ignore the cache flush because it'll happen
3104          * again at bind time.
3105          */
3106         if (!obj->mm.pages)
3107                 return;
3108
3109         /*
3110          * Stolen memory is always coherent with the GPU as it is explicitly
3111          * marked as wc by the system, or the system is cache-coherent.
3112          */
3113         if (obj->stolen || obj->phys_handle)
3114                 return;
3115
3116         /* If the GPU is snooping the contents of the CPU cache,
3117          * we do not need to manually clear the CPU cache lines.  However,
3118          * the caches are only snooped when the render cache is
3119          * flushed/invalidated.  As we always have to emit invalidations
3120          * and flushes when moving into and out of the RENDER domain, correct
3121          * snooping behaviour occurs naturally as the result of our domain
3122          * tracking.
3123          */
3124         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3125                 obj->cache_dirty = true;
3126                 return;
3127         }
3128
3129         trace_i915_gem_object_clflush(obj);
3130         drm_clflush_sg(obj->mm.pages);
3131         obj->cache_dirty = false;
3132 }
3133
3134 /** Flushes the GTT write domain for the object if it's dirty. */
3135 static void
3136 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3137 {
3138         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3139
3140         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3141                 return;
3142
3143         /* No actual flushing is required for the GTT write domain.  Writes
3144          * to it "immediately" go to main memory as far as we know, so there's
3145          * no chipset flush.  It also doesn't land in render cache.
3146          *
3147          * However, we do have to enforce the order so that all writes through
3148          * the GTT land before any writes to the device, such as updates to
3149          * the GATT itself.
3150          *
3151          * We also have to wait a bit for the writes to land from the GTT.
3152          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3153          * timing. This issue has only been observed when switching quickly
3154          * between GTT writes and CPU reads from inside the kernel on recent hw,
3155          * and it appears to only affect discrete GTT blocks (i.e. on LLC
3156          * system agents we cannot reproduce this behaviour).
3157          */
3158         wmb();
3159         if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3160                 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3161
3162         intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3163
3164         obj->base.write_domain = 0;
3165         trace_i915_gem_object_change_domain(obj,
3166                                             obj->base.read_domains,
3167                                             I915_GEM_DOMAIN_GTT);
3168 }
3169
3170 /** Flushes the CPU write domain for the object if it's dirty. */
3171 static void
3172 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3173 {
3174         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3175                 return;
3176
3177         i915_gem_clflush_object(obj, obj->pin_display);
3178         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3179
3180         obj->base.write_domain = 0;
3181         trace_i915_gem_object_change_domain(obj,
3182                                             obj->base.read_domains,
3183                                             I915_GEM_DOMAIN_CPU);
3184 }
3185
3186 /**
3187  * Moves a single object to the GTT read, and possibly write domain.
3188  * @obj: object to act on
3189  * @write: ask for write access or read only
3190  *
3191  * This function returns when the move is complete, including waiting on
3192  * flushes to occur.
3193  */
3194 int
3195 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3196 {
3197         uint32_t old_write_domain, old_read_domains;
3198         int ret;
3199
3200         lockdep_assert_held(&obj->base.dev->struct_mutex);
3201
3202         ret = i915_gem_object_wait(obj,
3203                                    I915_WAIT_INTERRUPTIBLE |
3204                                    I915_WAIT_LOCKED |
3205                                    (write ? I915_WAIT_ALL : 0),
3206                                    MAX_SCHEDULE_TIMEOUT,
3207                                    NULL);
3208         if (ret)
3209                 return ret;
3210
3211         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3212                 return 0;
3213
3214         /* Flush and acquire obj->pages so that we are coherent through
3215          * direct access in memory with previous cached writes through
3216          * shmemfs and that our cache domain tracking remains valid.
3217          * For example, if the obj->filp was moved to swap without us
3218          * being notified and releasing the pages, we would mistakenly
3219          * continue to assume that the obj remained out of the CPU cached
3220          * domain.
3221          */
3222         ret = i915_gem_object_pin_pages(obj);
3223         if (ret)
3224                 return ret;
3225
3226         i915_gem_object_flush_cpu_write_domain(obj);
3227
3228         /* Serialise direct access to this object with the barriers for
3229          * coherent writes from the GPU, by effectively invalidating the
3230          * GTT domain upon first access.
3231          */
3232         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3233                 mb();
3234
3235         old_write_domain = obj->base.write_domain;
3236         old_read_domains = obj->base.read_domains;
3237
3238         /* It should now be out of any other write domains, and we can update
3239          * the domain values for our changes.
3240          */
3241         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3242         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3243         if (write) {
3244                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3245                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3246                 obj->mm.dirty = true;
3247         }
3248
3249         trace_i915_gem_object_change_domain(obj,
3250                                             old_read_domains,
3251                                             old_write_domain);
3252
3253         i915_gem_object_unpin_pages(obj);
3254         return 0;
3255 }
3256
3257 /**
3258  * Changes the cache-level of an object across all VMA.
3259  * @obj: object to act on
3260  * @cache_level: new cache level to set for the object
3261  *
3262  * After this function returns, the object will be in the new cache-level
3263  * across all GTT and the contents of the backing storage will be coherent,
3264  * with respect to the new cache-level. In order to keep the backing storage
3265  * coherent for all users, we only allow a single cache level to be set
3266  * globally on the object and prevent it from being changed whilst the
3267  * hardware is reading from the object. That is if the object is currently
3268  * on the scanout it will be set to uncached (or equivalent display
3269  * cache coherency) and all non-MOCS GPU access will also be uncached so
3270  * that all direct access to the scanout remains coherent.
3271  */
3272 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3273                                     enum i915_cache_level cache_level)
3274 {
3275         struct i915_vma *vma;
3276         int ret;
3277
3278         lockdep_assert_held(&obj->base.dev->struct_mutex);
3279
3280         if (obj->cache_level == cache_level)
3281                 return 0;
3282
3283         /* Inspect the list of currently bound VMA and unbind any that would
3284          * be invalid given the new cache-level. This is principally to
3285          * catch the issue of the CS prefetch crossing page boundaries and
3286          * reading an invalid PTE on older architectures.
3287          */
3288 restart:
3289         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3290                 if (!drm_mm_node_allocated(&vma->node))
3291                         continue;
3292
3293                 if (i915_vma_is_pinned(vma)) {
3294                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3295                         return -EBUSY;
3296                 }
3297
3298                 if (i915_gem_valid_gtt_space(vma, cache_level))
3299                         continue;
3300
3301                 ret = i915_vma_unbind(vma);
3302                 if (ret)
3303                         return ret;
3304
3305                 /* As unbinding may affect other elements in the
3306                  * obj->vma_list (due to side-effects from retiring
3307                  * an active vma), play safe and restart the iterator.
3308                  */
3309                 goto restart;
3310         }
3311
3312         /* We can reuse the existing drm_mm nodes but need to change the
3313          * cache-level on the PTE. We could simply unbind them all and
3314          * rebind with the correct cache-level on next use. However since
3315          * we already have a valid slot, dma mapping, pages etc, we may as
3316          * rewrite the PTE in the belief that doing so tramples upon less
3317          * state and so involves less work.
3318          */
3319         if (obj->bind_count) {
3320                 /* Before we change the PTE, the GPU must not be accessing it.
3321                  * If we wait upon the object, we know that all the bound
3322                  * VMA are no longer active.
3323                  */
3324                 ret = i915_gem_object_wait(obj,
3325                                            I915_WAIT_INTERRUPTIBLE |
3326                                            I915_WAIT_LOCKED |
3327                                            I915_WAIT_ALL,
3328                                            MAX_SCHEDULE_TIMEOUT,
3329                                            NULL);
3330                 if (ret)
3331                         return ret;
3332
3333                 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3334                     cache_level != I915_CACHE_NONE) {
3335                         /* Access to snoopable pages through the GTT is
3336                          * incoherent and on some machines causes a hard
3337                          * lockup. Relinquish the CPU mmaping to force
3338                          * userspace to refault in the pages and we can
3339                          * then double check if the GTT mapping is still
3340                          * valid for that pointer access.
3341                          */
3342                         i915_gem_release_mmap(obj);
3343
3344                         /* As we no longer need a fence for GTT access,
3345                          * we can relinquish it now (and so prevent having
3346                          * to steal a fence from someone else on the next
3347                          * fence request). Note GPU activity would have
3348                          * dropped the fence as all snoopable access is
3349                          * supposed to be linear.
3350                          */
3351                         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3352                                 ret = i915_vma_put_fence(vma);
3353                                 if (ret)
3354                                         return ret;
3355                         }
3356                 } else {
3357                         /* We either have incoherent backing store and
3358                          * so no GTT access or the architecture is fully
3359                          * coherent. In such cases, existing GTT mmaps
3360                          * ignore the cache bit in the PTE and we can
3361                          * rewrite it without confusing the GPU or having
3362                          * to force userspace to fault back in its mmaps.
3363                          */
3364                 }
3365
3366                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3367                         if (!drm_mm_node_allocated(&vma->node))
3368                                 continue;
3369
3370                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3371                         if (ret)
3372                                 return ret;
3373                 }
3374         }
3375
3376         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
3377             cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3378                 obj->cache_dirty = true;
3379
3380         list_for_each_entry(vma, &obj->vma_list, obj_link)
3381                 vma->node.color = cache_level;
3382         obj->cache_level = cache_level;
3383
3384         return 0;
3385 }
3386
3387 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3388                                struct drm_file *file)
3389 {
3390         struct drm_i915_gem_caching *args = data;
3391         struct drm_i915_gem_object *obj;
3392         int err = 0;
3393
3394         rcu_read_lock();
3395         obj = i915_gem_object_lookup_rcu(file, args->handle);
3396         if (!obj) {
3397                 err = -ENOENT;
3398                 goto out;
3399         }
3400
3401         switch (obj->cache_level) {
3402         case I915_CACHE_LLC:
3403         case I915_CACHE_L3_LLC:
3404                 args->caching = I915_CACHING_CACHED;
3405                 break;
3406
3407         case I915_CACHE_WT:
3408                 args->caching = I915_CACHING_DISPLAY;
3409                 break;
3410
3411         default:
3412                 args->caching = I915_CACHING_NONE;
3413                 break;
3414         }
3415 out:
3416         rcu_read_unlock();
3417         return err;
3418 }
3419
3420 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3421                                struct drm_file *file)
3422 {
3423         struct drm_i915_private *i915 = to_i915(dev);
3424         struct drm_i915_gem_caching *args = data;
3425         struct drm_i915_gem_object *obj;
3426         enum i915_cache_level level;
3427         int ret;
3428
3429         switch (args->caching) {
3430         case I915_CACHING_NONE:
3431                 level = I915_CACHE_NONE;
3432                 break;
3433         case I915_CACHING_CACHED:
3434                 /*
3435                  * Due to a HW issue on BXT A stepping, GPU stores via a
3436                  * snooped mapping may leave stale data in a corresponding CPU
3437                  * cacheline, whereas normally such cachelines would get
3438                  * invalidated.
3439                  */
3440                 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3441                         return -ENODEV;
3442
3443                 level = I915_CACHE_LLC;
3444                 break;
3445         case I915_CACHING_DISPLAY:
3446                 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3447                 break;
3448         default:
3449                 return -EINVAL;
3450         }
3451
3452         ret = i915_mutex_lock_interruptible(dev);
3453         if (ret)
3454                 return ret;
3455
3456         obj = i915_gem_object_lookup(file, args->handle);
3457         if (!obj) {
3458                 ret = -ENOENT;
3459                 goto unlock;
3460         }
3461
3462         ret = i915_gem_object_set_cache_level(obj, level);
3463         i915_gem_object_put(obj);
3464 unlock:
3465         mutex_unlock(&dev->struct_mutex);
3466         return ret;
3467 }
3468
3469 /*
3470  * Prepare buffer for display plane (scanout, cursors, etc).
3471  * Can be called from an uninterruptible phase (modesetting) and allows
3472  * any flushes to be pipelined (for pageflips).
3473  */
3474 struct i915_vma *
3475 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3476                                      u32 alignment,
3477                                      const struct i915_ggtt_view *view)
3478 {
3479         struct i915_vma *vma;
3480         u32 old_read_domains, old_write_domain;
3481         int ret;
3482
3483         lockdep_assert_held(&obj->base.dev->struct_mutex);
3484
3485         /* Mark the pin_display early so that we account for the
3486          * display coherency whilst setting up the cache domains.
3487          */
3488         obj->pin_display++;
3489
3490         /* The display engine is not coherent with the LLC cache on gen6.  As
3491          * a result, we make sure that the pinning that is about to occur is
3492          * done with uncached PTEs. This is lowest common denominator for all
3493          * chipsets.
3494          *
3495          * However for gen6+, we could do better by using the GFDT bit instead
3496          * of uncaching, which would allow us to flush all the LLC-cached data
3497          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3498          */
3499         ret = i915_gem_object_set_cache_level(obj,
3500                                               HAS_WT(to_i915(obj->base.dev)) ?
3501                                               I915_CACHE_WT : I915_CACHE_NONE);
3502         if (ret) {
3503                 vma = ERR_PTR(ret);
3504                 goto err_unpin_display;
3505         }
3506
3507         /* As the user may map the buffer once pinned in the display plane
3508          * (e.g. libkms for the bootup splash), we have to ensure that we
3509          * always use map_and_fenceable for all scanout buffers. However,
3510          * it may simply be too big to fit into mappable, in which case
3511          * put it anyway and hope that userspace can cope (but always first
3512          * try to preserve the existing ABI).
3513          */
3514         vma = ERR_PTR(-ENOSPC);
3515         if (view->type == I915_GGTT_VIEW_NORMAL)
3516                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3517                                                PIN_MAPPABLE | PIN_NONBLOCK);
3518         if (IS_ERR(vma)) {
3519                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3520                 unsigned int flags;
3521
3522                 /* Valleyview is definitely limited to scanning out the first
3523                  * 512MiB. Lets presume this behaviour was inherited from the
3524                  * g4x display engine and that all earlier gen are similarly
3525                  * limited. Testing suggests that it is a little more
3526                  * complicated than this. For example, Cherryview appears quite
3527                  * happy to scanout from anywhere within its global aperture.
3528                  */
3529                 flags = 0;
3530                 if (HAS_GMCH_DISPLAY(i915))
3531                         flags = PIN_MAPPABLE;
3532                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3533         }
3534         if (IS_ERR(vma))
3535                 goto err_unpin_display;
3536
3537         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3538
3539         /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
3540         if (obj->cache_dirty) {
3541                 i915_gem_clflush_object(obj, true);
3542                 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
3543         }
3544
3545         old_write_domain = obj->base.write_domain;
3546         old_read_domains = obj->base.read_domains;
3547
3548         /* It should now be out of any other write domains, and we can update
3549          * the domain values for our changes.
3550          */
3551         obj->base.write_domain = 0;
3552         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3553
3554         trace_i915_gem_object_change_domain(obj,
3555                                             old_read_domains,
3556                                             old_write_domain);
3557
3558         return vma;
3559
3560 err_unpin_display:
3561         obj->pin_display--;
3562         return vma;
3563 }
3564
3565 void
3566 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3567 {
3568         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
3569
3570         if (WARN_ON(vma->obj->pin_display == 0))
3571                 return;
3572
3573         if (--vma->obj->pin_display == 0)
3574                 vma->display_alignment = 0;
3575
3576         /* Bump the LRU to try and avoid premature eviction whilst flipping  */
3577         if (!i915_vma_is_active(vma))
3578                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3579
3580         i915_vma_unpin(vma);
3581 }
3582
3583 /**
3584  * Moves a single object to the CPU read, and possibly write domain.
3585  * @obj: object to act on
3586  * @write: requesting write or read-only access
3587  *
3588  * This function returns when the move is complete, including waiting on
3589  * flushes to occur.
3590  */
3591 int
3592 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3593 {
3594         uint32_t old_write_domain, old_read_domains;
3595         int ret;
3596
3597         lockdep_assert_held(&obj->base.dev->struct_mutex);
3598
3599         ret = i915_gem_object_wait(obj,
3600                                    I915_WAIT_INTERRUPTIBLE |
3601                                    I915_WAIT_LOCKED |
3602                                    (write ? I915_WAIT_ALL : 0),
3603                                    MAX_SCHEDULE_TIMEOUT,
3604                                    NULL);
3605         if (ret)
3606                 return ret;
3607
3608         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3609                 return 0;
3610
3611         i915_gem_object_flush_gtt_write_domain(obj);
3612
3613         old_write_domain = obj->base.write_domain;
3614         old_read_domains = obj->base.read_domains;
3615
3616         /* Flush the CPU cache if it's still invalid. */
3617         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3618                 i915_gem_clflush_object(obj, false);
3619
3620                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3621         }
3622
3623         /* It should now be out of any other write domains, and we can update
3624          * the domain values for our changes.
3625          */
3626         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3627
3628         /* If we're writing through the CPU, then the GPU read domains will
3629          * need to be invalidated at next use.
3630          */
3631         if (write) {
3632                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3633                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3634         }
3635
3636         trace_i915_gem_object_change_domain(obj,
3637                                             old_read_domains,
3638                                             old_write_domain);
3639
3640         return 0;
3641 }
3642
3643 /* Throttle our rendering by waiting until the ring has completed our requests
3644  * emitted over 20 msec ago.
3645  *
3646  * Note that if we were to use the current jiffies each time around the loop,
3647  * we wouldn't escape the function with any frames outstanding if the time to
3648  * render a frame was over 20ms.
3649  *
3650  * This should get us reasonable parallelism between CPU and GPU but also
3651  * relatively low latency when blocking on a particular request to finish.
3652  */
3653 static int
3654 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3655 {
3656         struct drm_i915_private *dev_priv = to_i915(dev);
3657         struct drm_i915_file_private *file_priv = file->driver_priv;
3658         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3659         struct drm_i915_gem_request *request, *target = NULL;
3660         long ret;
3661
3662         /* ABI: return -EIO if already wedged */
3663         if (i915_terminally_wedged(&dev_priv->gpu_error))
3664                 return -EIO;
3665
3666         spin_lock(&file_priv->mm.lock);
3667         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3668                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3669                         break;
3670
3671                 /*
3672                  * Note that the request might not have been submitted yet.
3673                  * In which case emitted_jiffies will be zero.
3674                  */
3675                 if (!request->emitted_jiffies)
3676                         continue;
3677
3678                 target = request;
3679         }
3680         if (target)
3681                 i915_gem_request_get(target);
3682         spin_unlock(&file_priv->mm.lock);
3683
3684         if (target == NULL)
3685                 return 0;
3686
3687         ret = i915_wait_request(target,
3688                                 I915_WAIT_INTERRUPTIBLE,
3689                                 MAX_SCHEDULE_TIMEOUT);
3690         i915_gem_request_put(target);
3691
3692         return ret < 0 ? ret : 0;
3693 }
3694
3695 struct i915_vma *
3696 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3697                          const struct i915_ggtt_view *view,
3698                          u64 size,
3699                          u64 alignment,
3700                          u64 flags)
3701 {
3702         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3703         struct i915_address_space *vm = &dev_priv->ggtt.base;
3704         struct i915_vma *vma;
3705         int ret;
3706
3707         lockdep_assert_held(&obj->base.dev->struct_mutex);
3708
3709         vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3710         if (IS_ERR(vma))
3711                 return vma;
3712
3713         if (i915_vma_misplaced(vma, size, alignment, flags)) {
3714                 if (flags & PIN_NONBLOCK &&
3715                     (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
3716                         return ERR_PTR(-ENOSPC);
3717
3718                 if (flags & PIN_MAPPABLE) {
3719                         u32 fence_size;
3720
3721                         fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
3722                                                             i915_gem_object_get_tiling(obj));
3723                         /* If the required space is larger than the available
3724                          * aperture, we will not able to find a slot for the
3725                          * object and unbinding the object now will be in
3726                          * vain. Worse, doing so may cause us to ping-pong
3727                          * the object in and out of the Global GTT and
3728                          * waste a lot of cycles under the mutex.
3729                          */
3730                         if (fence_size > dev_priv->ggtt.mappable_end)
3731                                 return ERR_PTR(-E2BIG);
3732
3733                         /* If NONBLOCK is set the caller is optimistically
3734                          * trying to cache the full object within the mappable
3735                          * aperture, and *must* have a fallback in place for
3736                          * situations where we cannot bind the object. We
3737                          * can be a little more lax here and use the fallback
3738                          * more often to avoid costly migrations of ourselves
3739                          * and other objects within the aperture.
3740                          *
3741                          * Half-the-aperture is used as a simple heuristic.
3742                          * More interesting would to do search for a free
3743                          * block prior to making the commitment to unbind.
3744                          * That caters for the self-harm case, and with a
3745                          * little more heuristics (e.g. NOFAULT, NOEVICT)
3746                          * we could try to minimise harm to others.
3747                          */
3748                         if (flags & PIN_NONBLOCK &&
3749                             fence_size > dev_priv->ggtt.mappable_end / 2)
3750                                 return ERR_PTR(-ENOSPC);
3751                 }
3752
3753                 WARN(i915_vma_is_pinned(vma),
3754                      "bo is already pinned in ggtt with incorrect alignment:"
3755                      " offset=%08x, req.alignment=%llx,"
3756                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3757                      i915_ggtt_offset(vma), alignment,
3758                      !!(flags & PIN_MAPPABLE),
3759                      i915_vma_is_map_and_fenceable(vma));
3760                 ret = i915_vma_unbind(vma);
3761                 if (ret)
3762                         return ERR_PTR(ret);
3763         }
3764
3765         ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3766         if (ret)
3767                 return ERR_PTR(ret);
3768
3769         return vma;
3770 }
3771
3772 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3773 {
3774         /* Note that we could alias engines in the execbuf API, but
3775          * that would be very unwise as it prevents userspace from
3776          * fine control over engine selection. Ahem.
3777          *
3778          * This should be something like EXEC_MAX_ENGINE instead of
3779          * I915_NUM_ENGINES.
3780          */
3781         BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3782         return 0x10000 << id;
3783 }
3784
3785 static __always_inline unsigned int __busy_write_id(unsigned int id)
3786 {
3787         /* The uABI guarantees an active writer is also amongst the read
3788          * engines. This would be true if we accessed the activity tracking
3789          * under the lock, but as we perform the lookup of the object and
3790          * its activity locklessly we can not guarantee that the last_write
3791          * being active implies that we have set the same engine flag from
3792          * last_read - hence we always set both read and write busy for
3793          * last_write.
3794          */
3795         return id | __busy_read_flag(id);
3796 }
3797
3798 static __always_inline unsigned int
3799 __busy_set_if_active(const struct dma_fence *fence,
3800                      unsigned int (*flag)(unsigned int id))
3801 {
3802         struct drm_i915_gem_request *rq;
3803
3804         /* We have to check the current hw status of the fence as the uABI
3805          * guarantees forward progress. We could rely on the idle worker
3806          * to eventually flush us, but to minimise latency just ask the
3807          * hardware.
3808          *
3809          * Note we only report on the status of native fences.
3810          */
3811         if (!dma_fence_is_i915(fence))
3812                 return 0;
3813
3814         /* opencode to_request() in order to avoid const warnings */
3815         rq = container_of(fence, struct drm_i915_gem_request, fence);
3816         if (i915_gem_request_completed(rq))
3817                 return 0;
3818
3819         return flag(rq->engine->exec_id);
3820 }
3821
3822 static __always_inline unsigned int
3823 busy_check_reader(const struct dma_fence *fence)
3824 {
3825         return __busy_set_if_active(fence, __busy_read_flag);
3826 }
3827
3828 static __always_inline unsigned int
3829 busy_check_writer(const struct dma_fence *fence)
3830 {
3831         if (!fence)
3832                 return 0;
3833
3834         return __busy_set_if_active(fence, __busy_write_id);
3835 }
3836
3837 int
3838 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3839                     struct drm_file *file)
3840 {
3841         struct drm_i915_gem_busy *args = data;
3842         struct drm_i915_gem_object *obj;
3843         struct reservation_object_list *list;
3844         unsigned int seq;
3845         int err;
3846
3847         err = -ENOENT;
3848         rcu_read_lock();
3849         obj = i915_gem_object_lookup_rcu(file, args->handle);
3850         if (!obj)
3851                 goto out;
3852
3853         /* A discrepancy here is that we do not report the status of
3854          * non-i915 fences, i.e. even though we may report the object as idle,
3855          * a call to set-domain may still stall waiting for foreign rendering.
3856          * This also means that wait-ioctl may report an object as busy,
3857          * where busy-ioctl considers it idle.
3858          *
3859          * We trade the ability to warn of foreign fences to report on which
3860          * i915 engines are active for the object.
3861          *
3862          * Alternatively, we can trade that extra information on read/write
3863          * activity with
3864          *      args->busy =
3865          *              !reservation_object_test_signaled_rcu(obj->resv, true);
3866          * to report the overall busyness. This is what the wait-ioctl does.
3867          *
3868          */
3869 retry:
3870         seq = raw_read_seqcount(&obj->resv->seq);
3871
3872         /* Translate the exclusive fence to the READ *and* WRITE engine */
3873         args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3874
3875         /* Translate shared fences to READ set of engines */
3876         list = rcu_dereference(obj->resv->fence);
3877         if (list) {
3878                 unsigned int shared_count = list->shared_count, i;
3879
3880                 for (i = 0; i < shared_count; ++i) {
3881                         struct dma_fence *fence =
3882                                 rcu_dereference(list->shared[i]);
3883
3884                         args->busy |= busy_check_reader(fence);
3885                 }
3886         }
3887
3888         if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
3889                 goto retry;
3890
3891         err = 0;
3892 out:
3893         rcu_read_unlock();
3894         return err;
3895 }
3896
3897 int
3898 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3899                         struct drm_file *file_priv)
3900 {
3901         return i915_gem_ring_throttle(dev, file_priv);
3902 }
3903
3904 int
3905 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3906                        struct drm_file *file_priv)
3907 {
3908         struct drm_i915_private *dev_priv = to_i915(dev);
3909         struct drm_i915_gem_madvise *args = data;
3910         struct drm_i915_gem_object *obj;
3911         int err;
3912
3913         switch (args->madv) {
3914         case I915_MADV_DONTNEED:
3915         case I915_MADV_WILLNEED:
3916             break;
3917         default:
3918             return -EINVAL;
3919         }
3920
3921         obj = i915_gem_object_lookup(file_priv, args->handle);
3922         if (!obj)
3923                 return -ENOENT;
3924
3925         err = mutex_lock_interruptible(&obj->mm.lock);
3926         if (err)
3927                 goto out;
3928
3929         if (obj->mm.pages &&
3930             i915_gem_object_is_tiled(obj) &&
3931             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3932                 if (obj->mm.madv == I915_MADV_WILLNEED) {
3933                         GEM_BUG_ON(!obj->mm.quirked);
3934                         __i915_gem_object_unpin_pages(obj);
3935                         obj->mm.quirked = false;
3936                 }
3937                 if (args->madv == I915_MADV_WILLNEED) {
3938                         GEM_BUG_ON(obj->mm.quirked);
3939                         __i915_gem_object_pin_pages(obj);
3940                         obj->mm.quirked = true;
3941                 }
3942         }
3943
3944         if (obj->mm.madv != __I915_MADV_PURGED)
3945                 obj->mm.madv = args->madv;
3946
3947         /* if the object is no longer attached, discard its backing storage */
3948         if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
3949                 i915_gem_object_truncate(obj);
3950
3951         args->retained = obj->mm.madv != __I915_MADV_PURGED;
3952         mutex_unlock(&obj->mm.lock);
3953
3954 out:
3955         i915_gem_object_put(obj);
3956         return err;
3957 }
3958
3959 static void
3960 frontbuffer_retire(struct i915_gem_active *active,
3961                    struct drm_i915_gem_request *request)
3962 {
3963         struct drm_i915_gem_object *obj =
3964                 container_of(active, typeof(*obj), frontbuffer_write);
3965
3966         intel_fb_obj_flush(obj, true, ORIGIN_CS);
3967 }
3968
3969 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3970                           const struct drm_i915_gem_object_ops *ops)
3971 {
3972         mutex_init(&obj->mm.lock);
3973
3974         INIT_LIST_HEAD(&obj->global_link);
3975         INIT_LIST_HEAD(&obj->userfault_link);
3976         INIT_LIST_HEAD(&obj->obj_exec_link);
3977         INIT_LIST_HEAD(&obj->vma_list);
3978         INIT_LIST_HEAD(&obj->batch_pool_link);
3979
3980         obj->ops = ops;
3981
3982         reservation_object_init(&obj->__builtin_resv);
3983         obj->resv = &obj->__builtin_resv;
3984
3985         obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
3986         init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
3987
3988         obj->mm.madv = I915_MADV_WILLNEED;
3989         INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
3990         mutex_init(&obj->mm.get_page.lock);
3991
3992         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3993 }
3994
3995 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3996         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
3997                  I915_GEM_OBJECT_IS_SHRINKABLE,
3998         .get_pages = i915_gem_object_get_pages_gtt,
3999         .put_pages = i915_gem_object_put_pages_gtt,
4000 };
4001
4002 /* Note we don't consider signbits :| */
4003 #define overflows_type(x, T) \
4004         (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
4005
4006 struct drm_i915_gem_object *
4007 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4008 {
4009         struct drm_i915_gem_object *obj;
4010         struct address_space *mapping;
4011         gfp_t mask;
4012         int ret;
4013
4014         /* There is a prevalence of the assumption that we fit the object's
4015          * page count inside a 32bit _signed_ variable. Let's document this and
4016          * catch if we ever need to fix it. In the meantime, if you do spot
4017          * such a local variable, please consider fixing!
4018          */
4019         if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
4020                 return ERR_PTR(-E2BIG);
4021
4022         if (overflows_type(size, obj->base.size))
4023                 return ERR_PTR(-E2BIG);
4024
4025         obj = i915_gem_object_alloc(dev_priv);
4026         if (obj == NULL)
4027                 return ERR_PTR(-ENOMEM);
4028
4029         ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
4030         if (ret)
4031                 goto fail;
4032
4033         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4034         if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4035                 /* 965gm cannot relocate objects above 4GiB. */
4036                 mask &= ~__GFP_HIGHMEM;
4037                 mask |= __GFP_DMA32;
4038         }
4039
4040         mapping = obj->base.filp->f_mapping;
4041         mapping_set_gfp_mask(mapping, mask);
4042
4043         i915_gem_object_init(obj, &i915_gem_object_ops);
4044
4045         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4046         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4047
4048         if (HAS_LLC(dev_priv)) {
4049                 /* On some devices, we can have the GPU use the LLC (the CPU
4050                  * cache) for about a 10% performance improvement
4051                  * compared to uncached.  Graphics requests other than
4052                  * display scanout are coherent with the CPU in
4053                  * accessing this cache.  This means in this mode we
4054                  * don't need to clflush on the CPU side, and on the
4055                  * GPU side we only need to flush internal caches to
4056                  * get data visible to the CPU.
4057                  *
4058                  * However, we maintain the display planes as UC, and so
4059                  * need to rebind when first used as such.
4060                  */
4061                 obj->cache_level = I915_CACHE_LLC;
4062         } else
4063                 obj->cache_level = I915_CACHE_NONE;
4064
4065         trace_i915_gem_object_create(obj);
4066
4067         return obj;
4068
4069 fail:
4070         i915_gem_object_free(obj);
4071         return ERR_PTR(ret);
4072 }
4073
4074 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4075 {
4076         /* If we are the last user of the backing storage (be it shmemfs
4077          * pages or stolen etc), we know that the pages are going to be
4078          * immediately released. In this case, we can then skip copying
4079          * back the contents from the GPU.
4080          */
4081
4082         if (obj->mm.madv != I915_MADV_WILLNEED)
4083                 return false;
4084
4085         if (obj->base.filp == NULL)
4086                 return true;
4087
4088         /* At first glance, this looks racy, but then again so would be
4089          * userspace racing mmap against close. However, the first external
4090          * reference to the filp can only be obtained through the
4091          * i915_gem_mmap_ioctl() which safeguards us against the user
4092          * acquiring such a reference whilst we are in the middle of
4093          * freeing the object.
4094          */
4095         return atomic_long_read(&obj->base.filp->f_count) == 1;
4096 }
4097
4098 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4099                                     struct llist_node *freed)
4100 {
4101         struct drm_i915_gem_object *obj, *on;
4102
4103         mutex_lock(&i915->drm.struct_mutex);
4104         intel_runtime_pm_get(i915);
4105         llist_for_each_entry(obj, freed, freed) {
4106                 struct i915_vma *vma, *vn;
4107
4108                 trace_i915_gem_object_destroy(obj);
4109
4110                 GEM_BUG_ON(i915_gem_object_is_active(obj));
4111                 list_for_each_entry_safe(vma, vn,
4112                                          &obj->vma_list, obj_link) {
4113                         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4114                         GEM_BUG_ON(i915_vma_is_active(vma));
4115                         vma->flags &= ~I915_VMA_PIN_MASK;
4116                         i915_vma_close(vma);
4117                 }
4118                 GEM_BUG_ON(!list_empty(&obj->vma_list));
4119                 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4120
4121                 list_del(&obj->global_link);
4122         }
4123         intel_runtime_pm_put(i915);
4124         mutex_unlock(&i915->drm.struct_mutex);
4125
4126         llist_for_each_entry_safe(obj, on, freed, freed) {
4127                 GEM_BUG_ON(obj->bind_count);
4128                 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4129
4130                 if (obj->ops->release)
4131                         obj->ops->release(obj);
4132
4133                 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4134                         atomic_set(&obj->mm.pages_pin_count, 0);
4135                 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4136                 GEM_BUG_ON(obj->mm.pages);
4137
4138                 if (obj->base.import_attach)
4139                         drm_prime_gem_destroy(&obj->base, NULL);
4140
4141                 reservation_object_fini(&obj->__builtin_resv);
4142                 drm_gem_object_release(&obj->base);
4143                 i915_gem_info_remove_obj(i915, obj->base.size);
4144
4145                 kfree(obj->bit_17);
4146                 i915_gem_object_free(obj);
4147         }
4148 }
4149
4150 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4151 {
4152         struct llist_node *freed;
4153
4154         freed = llist_del_all(&i915->mm.free_list);
4155         if (unlikely(freed))
4156                 __i915_gem_free_objects(i915, freed);
4157 }
4158
4159 static void __i915_gem_free_work(struct work_struct *work)
4160 {
4161         struct drm_i915_private *i915 =
4162                 container_of(work, struct drm_i915_private, mm.free_work);
4163         struct llist_node *freed;
4164
4165         /* All file-owned VMA should have been released by this point through
4166          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4167          * However, the object may also be bound into the global GTT (e.g.
4168          * older GPUs without per-process support, or for direct access through
4169          * the GTT either for the user or for scanout). Those VMA still need to
4170          * unbound now.
4171          */
4172
4173         while ((freed = llist_del_all(&i915->mm.free_list)))
4174                 __i915_gem_free_objects(i915, freed);
4175 }
4176
4177 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4178 {
4179         struct drm_i915_gem_object *obj =
4180                 container_of(head, typeof(*obj), rcu);
4181         struct drm_i915_private *i915 = to_i915(obj->base.dev);
4182
4183         /* We can't simply use call_rcu() from i915_gem_free_object()
4184          * as we need to block whilst unbinding, and the call_rcu
4185          * task may be called from softirq context. So we take a
4186          * detour through a worker.
4187          */
4188         if (llist_add(&obj->freed, &i915->mm.free_list))
4189                 schedule_work(&i915->mm.free_work);
4190 }
4191
4192 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4193 {
4194         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4195
4196         if (obj->mm.quirked)
4197                 __i915_gem_object_unpin_pages(obj);
4198
4199         if (discard_backing_storage(obj))
4200                 obj->mm.madv = I915_MADV_DONTNEED;
4201
4202         /* Before we free the object, make sure any pure RCU-only
4203          * read-side critical sections are complete, e.g.
4204          * i915_gem_busy_ioctl(). For the corresponding synchronized
4205          * lookup see i915_gem_object_lookup_rcu().
4206          */
4207         call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4208 }
4209
4210 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4211 {
4212         lockdep_assert_held(&obj->base.dev->struct_mutex);
4213
4214         GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
4215         if (i915_gem_object_is_active(obj))
4216                 i915_gem_object_set_active_reference(obj);
4217         else
4218                 i915_gem_object_put(obj);
4219 }
4220
4221 static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
4222 {
4223         struct intel_engine_cs *engine;
4224         enum intel_engine_id id;
4225
4226         for_each_engine(engine, dev_priv, id)
4227                 GEM_BUG_ON(engine->last_retired_context != dev_priv->kernel_context);
4228 }
4229
4230 int i915_gem_suspend(struct drm_i915_private *dev_priv)
4231 {
4232         struct drm_device *dev = &dev_priv->drm;
4233         int ret;
4234
4235         intel_suspend_gt_powersave(dev_priv);
4236
4237         mutex_lock(&dev->struct_mutex);
4238
4239         /* We have to flush all the executing contexts to main memory so
4240          * that they can saved in the hibernation image. To ensure the last
4241          * context image is coherent, we have to switch away from it. That
4242          * leaves the dev_priv->kernel_context still active when
4243          * we actually suspend, and its image in memory may not match the GPU
4244          * state. Fortunately, the kernel_context is disposable and we do
4245          * not rely on its state.
4246          */
4247         ret = i915_gem_switch_to_kernel_context(dev_priv);
4248         if (ret)
4249                 goto err;
4250
4251         ret = i915_gem_wait_for_idle(dev_priv,
4252                                      I915_WAIT_INTERRUPTIBLE |
4253                                      I915_WAIT_LOCKED);
4254         if (ret)
4255                 goto err;
4256
4257         i915_gem_retire_requests(dev_priv);
4258         GEM_BUG_ON(dev_priv->gt.active_requests);
4259
4260         assert_kernel_context_is_current(dev_priv);
4261         i915_gem_context_lost(dev_priv);
4262         mutex_unlock(&dev->struct_mutex);
4263
4264         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4265         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4266         flush_delayed_work(&dev_priv->gt.idle_work);
4267         flush_work(&dev_priv->mm.free_work);
4268
4269         /* Assert that we sucessfully flushed all the work and
4270          * reset the GPU back to its idle, low power state.
4271          */
4272         WARN_ON(dev_priv->gt.awake);
4273         WARN_ON(!intel_execlists_idle(dev_priv));
4274
4275         /*
4276          * Neither the BIOS, ourselves or any other kernel
4277          * expects the system to be in execlists mode on startup,
4278          * so we need to reset the GPU back to legacy mode. And the only
4279          * known way to disable logical contexts is through a GPU reset.
4280          *
4281          * So in order to leave the system in a known default configuration,
4282          * always reset the GPU upon unload and suspend. Afterwards we then
4283          * clean up the GEM state tracking, flushing off the requests and
4284          * leaving the system in a known idle state.
4285          *
4286          * Note that is of the upmost importance that the GPU is idle and
4287          * all stray writes are flushed *before* we dismantle the backing
4288          * storage for the pinned objects.
4289          *
4290          * However, since we are uncertain that resetting the GPU on older
4291          * machines is a good idea, we don't - just in case it leaves the
4292          * machine in an unusable condition.
4293          */
4294         if (HAS_HW_CONTEXTS(dev_priv)) {
4295                 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
4296                 WARN_ON(reset && reset != -ENODEV);
4297         }
4298
4299         return 0;
4300
4301 err:
4302         mutex_unlock(&dev->struct_mutex);
4303         return ret;
4304 }
4305
4306 void i915_gem_resume(struct drm_i915_private *dev_priv)
4307 {
4308         struct drm_device *dev = &dev_priv->drm;
4309
4310         WARN_ON(dev_priv->gt.awake);
4311
4312         mutex_lock(&dev->struct_mutex);
4313         i915_gem_restore_gtt_mappings(dev_priv);
4314
4315         /* As we didn't flush the kernel context before suspend, we cannot
4316          * guarantee that the context image is complete. So let's just reset
4317          * it and start again.
4318          */
4319         dev_priv->gt.resume(dev_priv);
4320
4321         mutex_unlock(&dev->struct_mutex);
4322 }
4323
4324 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4325 {
4326         if (INTEL_GEN(dev_priv) < 5 ||
4327             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4328                 return;
4329
4330         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4331                                  DISP_TILE_SURFACE_SWIZZLING);
4332
4333         if (IS_GEN5(dev_priv))
4334                 return;
4335
4336         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4337         if (IS_GEN6(dev_priv))
4338                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4339         else if (IS_GEN7(dev_priv))
4340                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4341         else if (IS_GEN8(dev_priv))
4342                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4343         else
4344                 BUG();
4345 }
4346
4347 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4348 {
4349         I915_WRITE(RING_CTL(base), 0);
4350         I915_WRITE(RING_HEAD(base), 0);
4351         I915_WRITE(RING_TAIL(base), 0);
4352         I915_WRITE(RING_START(base), 0);
4353 }
4354
4355 static void init_unused_rings(struct drm_i915_private *dev_priv)
4356 {
4357         if (IS_I830(dev_priv)) {
4358                 init_unused_ring(dev_priv, PRB1_BASE);
4359                 init_unused_ring(dev_priv, SRB0_BASE);
4360                 init_unused_ring(dev_priv, SRB1_BASE);
4361                 init_unused_ring(dev_priv, SRB2_BASE);
4362                 init_unused_ring(dev_priv, SRB3_BASE);
4363         } else if (IS_GEN2(dev_priv)) {
4364                 init_unused_ring(dev_priv, SRB0_BASE);
4365                 init_unused_ring(dev_priv, SRB1_BASE);
4366         } else if (IS_GEN3(dev_priv)) {
4367                 init_unused_ring(dev_priv, PRB1_BASE);
4368                 init_unused_ring(dev_priv, PRB2_BASE);
4369         }
4370 }
4371
4372 int
4373 i915_gem_init_hw(struct drm_i915_private *dev_priv)
4374 {
4375         struct intel_engine_cs *engine;
4376         enum intel_engine_id id;
4377         int ret;
4378
4379         dev_priv->gt.last_init_time = ktime_get();
4380
4381         /* Double layer security blanket, see i915_gem_init() */
4382         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4383
4384         if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4385                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4386
4387         if (IS_HASWELL(dev_priv))
4388                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4389                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4390
4391         if (HAS_PCH_NOP(dev_priv)) {
4392                 if (IS_IVYBRIDGE(dev_priv)) {
4393                         u32 temp = I915_READ(GEN7_MSG_CTL);
4394                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4395                         I915_WRITE(GEN7_MSG_CTL, temp);
4396                 } else if (INTEL_GEN(dev_priv) >= 7) {
4397                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4398                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4399                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4400                 }
4401         }
4402
4403         i915_gem_init_swizzling(dev_priv);
4404
4405         /*
4406          * At least 830 can leave some of the unused rings
4407          * "active" (ie. head != tail) after resume which
4408          * will prevent c3 entry. Makes sure all unused rings
4409          * are totally idle.
4410          */
4411         init_unused_rings(dev_priv);
4412
4413         BUG_ON(!dev_priv->kernel_context);
4414
4415         ret = i915_ppgtt_init_hw(dev_priv);
4416         if (ret) {
4417                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4418                 goto out;
4419         }
4420
4421         /* Need to do basic initialisation of all rings first: */
4422         for_each_engine(engine, dev_priv, id) {
4423                 ret = engine->init_hw(engine);
4424                 if (ret)
4425                         goto out;
4426         }
4427
4428         intel_mocs_init_l3cc_table(dev_priv);
4429
4430         /* We can't enable contexts until all firmware is loaded */
4431         ret = intel_guc_setup(dev_priv);
4432         if (ret)
4433                 goto out;
4434
4435 out:
4436         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4437         return ret;
4438 }
4439
4440 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4441 {
4442         if (INTEL_INFO(dev_priv)->gen < 6)
4443                 return false;
4444
4445         /* TODO: make semaphores and Execlists play nicely together */
4446         if (i915.enable_execlists)
4447                 return false;
4448
4449         if (value >= 0)
4450                 return value;
4451
4452 #ifdef CONFIG_INTEL_IOMMU
4453         /* Enable semaphores on SNB when IO remapping is off */
4454         if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4455                 return false;
4456 #endif
4457
4458         return true;
4459 }
4460
4461 int i915_gem_init(struct drm_i915_private *dev_priv)
4462 {
4463         int ret;
4464
4465         mutex_lock(&dev_priv->drm.struct_mutex);
4466
4467         if (!i915.enable_execlists) {
4468                 dev_priv->gt.resume = intel_legacy_submission_resume;
4469                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4470         } else {
4471                 dev_priv->gt.resume = intel_lr_context_resume;
4472                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4473         }
4474
4475         /* This is just a security blanket to placate dragons.
4476          * On some systems, we very sporadically observe that the first TLBs
4477          * used by the CS may be stale, despite us poking the TLB reset. If
4478          * we hold the forcewake during initialisation these problems
4479          * just magically go away.
4480          */
4481         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4482
4483         i915_gem_init_userptr(dev_priv);
4484
4485         ret = i915_gem_init_ggtt(dev_priv);
4486         if (ret)
4487                 goto out_unlock;
4488
4489         ret = i915_gem_context_init(dev_priv);
4490         if (ret)
4491                 goto out_unlock;
4492
4493         ret = intel_engines_init(dev_priv);
4494         if (ret)
4495                 goto out_unlock;
4496
4497         ret = i915_gem_init_hw(dev_priv);
4498         if (ret == -EIO) {
4499                 /* Allow engine initialisation to fail by marking the GPU as
4500                  * wedged. But we only want to do this where the GPU is angry,
4501                  * for all other failure, such as an allocation failure, bail.
4502                  */
4503                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4504                 i915_gem_set_wedged(dev_priv);
4505                 ret = 0;
4506         }
4507
4508 out_unlock:
4509         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4510         mutex_unlock(&dev_priv->drm.struct_mutex);
4511
4512         return ret;
4513 }
4514
4515 void
4516 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
4517 {
4518         struct intel_engine_cs *engine;
4519         enum intel_engine_id id;
4520
4521         for_each_engine(engine, dev_priv, id)
4522                 dev_priv->gt.cleanup_engine(engine);
4523 }
4524
4525 void
4526 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4527 {
4528         int i;
4529
4530         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4531             !IS_CHERRYVIEW(dev_priv))
4532                 dev_priv->num_fence_regs = 32;
4533         else if (INTEL_INFO(dev_priv)->gen >= 4 ||
4534                  IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4535                  IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
4536                 dev_priv->num_fence_regs = 16;
4537         else
4538                 dev_priv->num_fence_regs = 8;
4539
4540         if (intel_vgpu_active(dev_priv))
4541                 dev_priv->num_fence_regs =
4542                                 I915_READ(vgtif_reg(avail_rs.fence_num));
4543
4544         /* Initialize fence registers to zero */
4545         for (i = 0; i < dev_priv->num_fence_regs; i++) {
4546                 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4547
4548                 fence->i915 = dev_priv;
4549                 fence->id = i;
4550                 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4551         }
4552         i915_gem_restore_fences(dev_priv);
4553
4554         i915_gem_detect_bit_6_swizzle(dev_priv);
4555 }
4556
4557 int
4558 i915_gem_load_init(struct drm_i915_private *dev_priv)
4559 {
4560         int err = -ENOMEM;
4561
4562         dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
4563         if (!dev_priv->objects)
4564                 goto err_out;
4565
4566         dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
4567         if (!dev_priv->vmas)
4568                 goto err_objects;
4569
4570         dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
4571                                         SLAB_HWCACHE_ALIGN |
4572                                         SLAB_RECLAIM_ACCOUNT |
4573                                         SLAB_DESTROY_BY_RCU);
4574         if (!dev_priv->requests)
4575                 goto err_vmas;
4576
4577         dev_priv->dependencies = KMEM_CACHE(i915_dependency,
4578                                             SLAB_HWCACHE_ALIGN |
4579                                             SLAB_RECLAIM_ACCOUNT);
4580         if (!dev_priv->dependencies)
4581                 goto err_requests;
4582
4583         mutex_lock(&dev_priv->drm.struct_mutex);
4584         INIT_LIST_HEAD(&dev_priv->gt.timelines);
4585         err = i915_gem_timeline_init__global(dev_priv);
4586         mutex_unlock(&dev_priv->drm.struct_mutex);
4587         if (err)
4588                 goto err_dependencies;
4589
4590         INIT_LIST_HEAD(&dev_priv->context_list);
4591         INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
4592         init_llist_head(&dev_priv->mm.free_list);
4593         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4594         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4595         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4596         INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4597         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4598                           i915_gem_retire_work_handler);
4599         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4600                           i915_gem_idle_work_handler);
4601         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4602         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4603
4604         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4605
4606         init_waitqueue_head(&dev_priv->pending_flip_queue);
4607
4608         dev_priv->mm.interruptible = true;
4609
4610         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4611
4612         spin_lock_init(&dev_priv->fb_tracking.lock);
4613
4614         return 0;
4615
4616 err_dependencies:
4617         kmem_cache_destroy(dev_priv->dependencies);
4618 err_requests:
4619         kmem_cache_destroy(dev_priv->requests);
4620 err_vmas:
4621         kmem_cache_destroy(dev_priv->vmas);
4622 err_objects:
4623         kmem_cache_destroy(dev_priv->objects);
4624 err_out:
4625         return err;
4626 }
4627
4628 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
4629 {
4630         WARN_ON(!llist_empty(&dev_priv->mm.free_list));
4631
4632         mutex_lock(&dev_priv->drm.struct_mutex);
4633         i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
4634         WARN_ON(!list_empty(&dev_priv->gt.timelines));
4635         mutex_unlock(&dev_priv->drm.struct_mutex);
4636
4637         kmem_cache_destroy(dev_priv->dependencies);
4638         kmem_cache_destroy(dev_priv->requests);
4639         kmem_cache_destroy(dev_priv->vmas);
4640         kmem_cache_destroy(dev_priv->objects);
4641
4642         /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4643         rcu_barrier();
4644 }
4645
4646 int i915_gem_freeze(struct drm_i915_private *dev_priv)
4647 {
4648         intel_runtime_pm_get(dev_priv);
4649
4650         mutex_lock(&dev_priv->drm.struct_mutex);
4651         i915_gem_shrink_all(dev_priv);
4652         mutex_unlock(&dev_priv->drm.struct_mutex);
4653
4654         intel_runtime_pm_put(dev_priv);
4655
4656         return 0;
4657 }
4658
4659 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4660 {
4661         struct drm_i915_gem_object *obj;
4662         struct list_head *phases[] = {
4663                 &dev_priv->mm.unbound_list,
4664                 &dev_priv->mm.bound_list,
4665                 NULL
4666         }, **p;
4667
4668         /* Called just before we write the hibernation image.
4669          *
4670          * We need to update the domain tracking to reflect that the CPU
4671          * will be accessing all the pages to create and restore from the
4672          * hibernation, and so upon restoration those pages will be in the
4673          * CPU domain.
4674          *
4675          * To make sure the hibernation image contains the latest state,
4676          * we update that state just before writing out the image.
4677          *
4678          * To try and reduce the hibernation image, we manually shrink
4679          * the objects as well.
4680          */
4681
4682         mutex_lock(&dev_priv->drm.struct_mutex);
4683         i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4684
4685         for (p = phases; *p; p++) {
4686                 list_for_each_entry(obj, *p, global_link) {
4687                         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4688                         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4689                 }
4690         }
4691         mutex_unlock(&dev_priv->drm.struct_mutex);
4692
4693         return 0;
4694 }
4695
4696 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4697 {
4698         struct drm_i915_file_private *file_priv = file->driver_priv;
4699         struct drm_i915_gem_request *request;
4700
4701         /* Clean up our request list when the client is going away, so that
4702          * later retire_requests won't dereference our soon-to-be-gone
4703          * file_priv.
4704          */
4705         spin_lock(&file_priv->mm.lock);
4706         list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4707                 request->file_priv = NULL;
4708         spin_unlock(&file_priv->mm.lock);
4709
4710         if (!list_empty(&file_priv->rps.link)) {
4711                 spin_lock(&to_i915(dev)->rps.client_lock);
4712                 list_del(&file_priv->rps.link);
4713                 spin_unlock(&to_i915(dev)->rps.client_lock);
4714         }
4715 }
4716
4717 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4718 {
4719         struct drm_i915_file_private *file_priv;
4720         int ret;
4721
4722         DRM_DEBUG("\n");
4723
4724         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4725         if (!file_priv)
4726                 return -ENOMEM;
4727
4728         file->driver_priv = file_priv;
4729         file_priv->dev_priv = to_i915(dev);
4730         file_priv->file = file;
4731         INIT_LIST_HEAD(&file_priv->rps.link);
4732
4733         spin_lock_init(&file_priv->mm.lock);
4734         INIT_LIST_HEAD(&file_priv->mm.request_list);
4735
4736         file_priv->bsd_engine = -1;
4737
4738         ret = i915_gem_context_open(dev, file);
4739         if (ret)
4740                 kfree(file_priv);
4741
4742         return ret;
4743 }
4744
4745 /**
4746  * i915_gem_track_fb - update frontbuffer tracking
4747  * @old: current GEM buffer for the frontbuffer slots
4748  * @new: new GEM buffer for the frontbuffer slots
4749  * @frontbuffer_bits: bitmask of frontbuffer slots
4750  *
4751  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4752  * from @old and setting them in @new. Both @old and @new can be NULL.
4753  */
4754 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4755                        struct drm_i915_gem_object *new,
4756                        unsigned frontbuffer_bits)
4757 {
4758         /* Control of individual bits within the mask are guarded by
4759          * the owning plane->mutex, i.e. we can never see concurrent
4760          * manipulation of individual bits. But since the bitfield as a whole
4761          * is updated using RMW, we need to use atomics in order to update
4762          * the bits.
4763          */
4764         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4765                      sizeof(atomic_t) * BITS_PER_BYTE);
4766
4767         if (old) {
4768                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4769                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4770         }
4771
4772         if (new) {
4773                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4774                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4775         }
4776 }
4777
4778 /* Allocate a new GEM object and fill it with the supplied data */
4779 struct drm_i915_gem_object *
4780 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
4781                                  const void *data, size_t size)
4782 {
4783         struct drm_i915_gem_object *obj;
4784         struct sg_table *sg;
4785         size_t bytes;
4786         int ret;
4787
4788         obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
4789         if (IS_ERR(obj))
4790                 return obj;
4791
4792         ret = i915_gem_object_set_to_cpu_domain(obj, true);
4793         if (ret)
4794                 goto fail;
4795
4796         ret = i915_gem_object_pin_pages(obj);
4797         if (ret)
4798                 goto fail;
4799
4800         sg = obj->mm.pages;
4801         bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4802         obj->mm.dirty = true; /* Backing store is now out of date */
4803         i915_gem_object_unpin_pages(obj);
4804
4805         if (WARN_ON(bytes != size)) {
4806                 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4807                 ret = -EFAULT;
4808                 goto fail;
4809         }
4810
4811         return obj;
4812
4813 fail:
4814         i915_gem_object_put(obj);
4815         return ERR_PTR(ret);
4816 }
4817
4818 struct scatterlist *
4819 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
4820                        unsigned int n,
4821                        unsigned int *offset)
4822 {
4823         struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
4824         struct scatterlist *sg;
4825         unsigned int idx, count;
4826
4827         might_sleep();
4828         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
4829         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
4830
4831         /* As we iterate forward through the sg, we record each entry in a
4832          * radixtree for quick repeated (backwards) lookups. If we have seen
4833          * this index previously, we will have an entry for it.
4834          *
4835          * Initial lookup is O(N), but this is amortized to O(1) for
4836          * sequential page access (where each new request is consecutive
4837          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
4838          * i.e. O(1) with a large constant!
4839          */
4840         if (n < READ_ONCE(iter->sg_idx))
4841                 goto lookup;
4842
4843         mutex_lock(&iter->lock);
4844
4845         /* We prefer to reuse the last sg so that repeated lookup of this
4846          * (or the subsequent) sg are fast - comparing against the last
4847          * sg is faster than going through the radixtree.
4848          */
4849
4850         sg = iter->sg_pos;
4851         idx = iter->sg_idx;
4852         count = __sg_page_count(sg);
4853
4854         while (idx + count <= n) {
4855                 unsigned long exception, i;
4856                 int ret;
4857
4858                 /* If we cannot allocate and insert this entry, or the
4859                  * individual pages from this range, cancel updating the
4860                  * sg_idx so that on this lookup we are forced to linearly
4861                  * scan onwards, but on future lookups we will try the
4862                  * insertion again (in which case we need to be careful of
4863                  * the error return reporting that we have already inserted
4864                  * this index).
4865                  */
4866                 ret = radix_tree_insert(&iter->radix, idx, sg);
4867                 if (ret && ret != -EEXIST)
4868                         goto scan;
4869
4870                 exception =
4871                         RADIX_TREE_EXCEPTIONAL_ENTRY |
4872                         idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
4873                 for (i = 1; i < count; i++) {
4874                         ret = radix_tree_insert(&iter->radix, idx + i,
4875                                                 (void *)exception);
4876                         if (ret && ret != -EEXIST)
4877                                 goto scan;
4878                 }
4879
4880                 idx += count;
4881                 sg = ____sg_next(sg);
4882                 count = __sg_page_count(sg);
4883         }
4884
4885 scan:
4886         iter->sg_pos = sg;
4887         iter->sg_idx = idx;
4888
4889         mutex_unlock(&iter->lock);
4890
4891         if (unlikely(n < idx)) /* insertion completed by another thread */
4892                 goto lookup;
4893
4894         /* In case we failed to insert the entry into the radixtree, we need
4895          * to look beyond the current sg.
4896          */
4897         while (idx + count <= n) {
4898                 idx += count;
4899                 sg = ____sg_next(sg);
4900                 count = __sg_page_count(sg);
4901         }
4902
4903         *offset = n - idx;
4904         return sg;
4905
4906 lookup:
4907         rcu_read_lock();
4908
4909         sg = radix_tree_lookup(&iter->radix, n);
4910         GEM_BUG_ON(!sg);
4911
4912         /* If this index is in the middle of multi-page sg entry,
4913          * the radixtree will contain an exceptional entry that points
4914          * to the start of that range. We will return the pointer to
4915          * the base page and the offset of this page within the
4916          * sg entry's range.
4917          */
4918         *offset = 0;
4919         if (unlikely(radix_tree_exception(sg))) {
4920                 unsigned long base =
4921                         (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
4922
4923                 sg = radix_tree_lookup(&iter->radix, base);
4924                 GEM_BUG_ON(!sg);
4925
4926                 *offset = n - base;
4927         }
4928
4929         rcu_read_unlock();
4930
4931         return sg;
4932 }
4933
4934 struct page *
4935 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
4936 {
4937         struct scatterlist *sg;
4938         unsigned int offset;
4939
4940         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
4941
4942         sg = i915_gem_object_get_sg(obj, n, &offset);
4943         return nth_page(sg_page(sg), offset);
4944 }
4945
4946 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4947 struct page *
4948 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
4949                                unsigned int n)
4950 {
4951         struct page *page;
4952
4953         page = i915_gem_object_get_page(obj, n);
4954         if (!obj->mm.dirty)
4955                 set_page_dirty(page);
4956
4957         return page;
4958 }
4959
4960 dma_addr_t
4961 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
4962                                 unsigned long n)
4963 {
4964         struct scatterlist *sg;
4965         unsigned int offset;
4966
4967         sg = i915_gem_object_get_sg(obj, n, &offset);
4968         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
4969 }