]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
Merge branch 'drm-intel-fixes' into drm-intel-next
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/intel-gtt.h>
38
39 struct change_domains {
40         uint32_t invalidate_domains;
41         uint32_t flush_domains;
42         uint32_t flush_rings;
43 };
44
45 static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv);
46 static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv);
47
48 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
49                                                   bool pipelined);
50 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
51 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
52 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
53                                              int write);
54 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
55                                                      uint64_t offset,
56                                                      uint64_t size);
57 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
58 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
59                                           bool interruptible);
60 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
61                                        unsigned alignment,
62                                        bool map_and_fenceable);
63 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
64 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
65                                 struct drm_i915_gem_pwrite *args,
66                                 struct drm_file *file_priv);
67 static void i915_gem_free_object_tail(struct drm_gem_object *obj);
68
69 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
70                                     int nr_to_scan,
71                                     gfp_t gfp_mask);
72
73
74 /* some bookkeeping */
75 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
76                                   size_t size)
77 {
78         dev_priv->mm.object_count++;
79         dev_priv->mm.object_memory += size;
80 }
81
82 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83                                      size_t size)
84 {
85         dev_priv->mm.object_count--;
86         dev_priv->mm.object_memory -= size;
87 }
88
89 static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
90                                   struct drm_i915_gem_object *obj)
91 {
92         dev_priv->mm.gtt_count++;
93         dev_priv->mm.gtt_memory += obj->gtt_space->size;
94         if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
95                 dev_priv->mm.mappable_gtt_used +=
96                         min_t(size_t, obj->gtt_space->size,
97                               dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
98         }
99 }
100
101 static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
102                                      struct drm_i915_gem_object *obj)
103 {
104         dev_priv->mm.gtt_count--;
105         dev_priv->mm.gtt_memory -= obj->gtt_space->size;
106         if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
107                 dev_priv->mm.mappable_gtt_used -=
108                         min_t(size_t, obj->gtt_space->size,
109                               dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
110         }
111 }
112
113 /**
114  * Update the mappable working set counters. Call _only_ when there is a change
115  * in one of (pin|fault)_mappable and update *_mappable _before_ calling.
116  * @mappable: new state the changed mappable flag (either pin_ or fault_).
117  */
118 static void
119 i915_gem_info_update_mappable(struct drm_i915_private *dev_priv,
120                               struct drm_i915_gem_object *obj,
121                               bool mappable)
122 {
123         if (mappable) {
124                 if (obj->pin_mappable && obj->fault_mappable)
125                         /* Combined state was already mappable. */
126                         return;
127                 dev_priv->mm.gtt_mappable_count++;
128                 dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size;
129         } else {
130                 if (obj->pin_mappable || obj->fault_mappable)
131                         /* Combined state still mappable. */
132                         return;
133                 dev_priv->mm.gtt_mappable_count--;
134                 dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size;
135         }
136 }
137
138 static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
139                                   struct drm_i915_gem_object *obj,
140                                   bool mappable)
141 {
142         dev_priv->mm.pin_count++;
143         dev_priv->mm.pin_memory += obj->gtt_space->size;
144         if (mappable) {
145                 obj->pin_mappable = true;
146                 i915_gem_info_update_mappable(dev_priv, obj, true);
147         }
148 }
149
150 static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
151                                      struct drm_i915_gem_object *obj)
152 {
153         dev_priv->mm.pin_count--;
154         dev_priv->mm.pin_memory -= obj->gtt_space->size;
155         if (obj->pin_mappable) {
156                 obj->pin_mappable = false;
157                 i915_gem_info_update_mappable(dev_priv, obj, false);
158         }
159 }
160
161 int
162 i915_gem_check_is_wedged(struct drm_device *dev)
163 {
164         struct drm_i915_private *dev_priv = dev->dev_private;
165         struct completion *x = &dev_priv->error_completion;
166         unsigned long flags;
167         int ret;
168
169         if (!atomic_read(&dev_priv->mm.wedged))
170                 return 0;
171
172         ret = wait_for_completion_interruptible(x);
173         if (ret)
174                 return ret;
175
176         /* Success, we reset the GPU! */
177         if (!atomic_read(&dev_priv->mm.wedged))
178                 return 0;
179
180         /* GPU is hung, bump the completion count to account for
181          * the token we just consumed so that we never hit zero and
182          * end up waiting upon a subsequent completion event that
183          * will never happen.
184          */
185         spin_lock_irqsave(&x->wait.lock, flags);
186         x->done++;
187         spin_unlock_irqrestore(&x->wait.lock, flags);
188         return -EIO;
189 }
190
191 static int i915_mutex_lock_interruptible(struct drm_device *dev)
192 {
193         struct drm_i915_private *dev_priv = dev->dev_private;
194         int ret;
195
196         ret = i915_gem_check_is_wedged(dev);
197         if (ret)
198                 return ret;
199
200         ret = mutex_lock_interruptible(&dev->struct_mutex);
201         if (ret)
202                 return ret;
203
204         if (atomic_read(&dev_priv->mm.wedged)) {
205                 mutex_unlock(&dev->struct_mutex);
206                 return -EAGAIN;
207         }
208
209         WARN_ON(i915_verify_lists(dev));
210         return 0;
211 }
212
213 static inline bool
214 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
215 {
216         return obj_priv->gtt_space &&
217                 !obj_priv->active &&
218                 obj_priv->pin_count == 0;
219 }
220
221 int i915_gem_do_init(struct drm_device *dev,
222                      unsigned long start,
223                      unsigned long mappable_end,
224                      unsigned long end)
225 {
226         drm_i915_private_t *dev_priv = dev->dev_private;
227
228         if (start >= end ||
229             (start & (PAGE_SIZE - 1)) != 0 ||
230             (end & (PAGE_SIZE - 1)) != 0) {
231                 return -EINVAL;
232         }
233
234         drm_mm_init(&dev_priv->mm.gtt_space, start,
235                     end - start);
236
237         dev_priv->mm.gtt_total = end - start;
238         dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
239         dev_priv->mm.gtt_mappable_end = mappable_end;
240
241         return 0;
242 }
243
244 int
245 i915_gem_init_ioctl(struct drm_device *dev, void *data,
246                     struct drm_file *file_priv)
247 {
248         struct drm_i915_gem_init *args = data;
249         int ret;
250
251         mutex_lock(&dev->struct_mutex);
252         ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
253         mutex_unlock(&dev->struct_mutex);
254
255         return ret;
256 }
257
258 int
259 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
260                             struct drm_file *file_priv)
261 {
262         struct drm_i915_private *dev_priv = dev->dev_private;
263         struct drm_i915_gem_get_aperture *args = data;
264
265         if (!(dev->driver->driver_features & DRIVER_GEM))
266                 return -ENODEV;
267
268         mutex_lock(&dev->struct_mutex);
269         args->aper_size = dev_priv->mm.gtt_total;
270         args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
271         mutex_unlock(&dev->struct_mutex);
272
273         return 0;
274 }
275
276
277 /**
278  * Creates a new mm object and returns a handle to it.
279  */
280 int
281 i915_gem_create_ioctl(struct drm_device *dev, void *data,
282                       struct drm_file *file_priv)
283 {
284         struct drm_i915_gem_create *args = data;
285         struct drm_gem_object *obj;
286         int ret;
287         u32 handle;
288
289         args->size = roundup(args->size, PAGE_SIZE);
290
291         /* Allocate the new object */
292         obj = i915_gem_alloc_object(dev, args->size);
293         if (obj == NULL)
294                 return -ENOMEM;
295
296         ret = drm_gem_handle_create(file_priv, obj, &handle);
297         if (ret) {
298                 drm_gem_object_release(obj);
299                 i915_gem_info_remove_obj(dev->dev_private, obj->size);
300                 kfree(obj);
301                 return ret;
302         }
303
304         /* drop reference from allocate - handle holds it now */
305         drm_gem_object_unreference(obj);
306         trace_i915_gem_object_create(obj);
307
308         args->handle = handle;
309         return 0;
310 }
311
312 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
313 {
314         drm_i915_private_t *dev_priv = obj->dev->dev_private;
315         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
316
317         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
318                 obj_priv->tiling_mode != I915_TILING_NONE;
319 }
320
321 static inline void
322 slow_shmem_copy(struct page *dst_page,
323                 int dst_offset,
324                 struct page *src_page,
325                 int src_offset,
326                 int length)
327 {
328         char *dst_vaddr, *src_vaddr;
329
330         dst_vaddr = kmap(dst_page);
331         src_vaddr = kmap(src_page);
332
333         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
334
335         kunmap(src_page);
336         kunmap(dst_page);
337 }
338
339 static inline void
340 slow_shmem_bit17_copy(struct page *gpu_page,
341                       int gpu_offset,
342                       struct page *cpu_page,
343                       int cpu_offset,
344                       int length,
345                       int is_read)
346 {
347         char *gpu_vaddr, *cpu_vaddr;
348
349         /* Use the unswizzled path if this page isn't affected. */
350         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
351                 if (is_read)
352                         return slow_shmem_copy(cpu_page, cpu_offset,
353                                                gpu_page, gpu_offset, length);
354                 else
355                         return slow_shmem_copy(gpu_page, gpu_offset,
356                                                cpu_page, cpu_offset, length);
357         }
358
359         gpu_vaddr = kmap(gpu_page);
360         cpu_vaddr = kmap(cpu_page);
361
362         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
363          * XORing with the other bits (A9 for Y, A9 and A10 for X)
364          */
365         while (length > 0) {
366                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
367                 int this_length = min(cacheline_end - gpu_offset, length);
368                 int swizzled_gpu_offset = gpu_offset ^ 64;
369
370                 if (is_read) {
371                         memcpy(cpu_vaddr + cpu_offset,
372                                gpu_vaddr + swizzled_gpu_offset,
373                                this_length);
374                 } else {
375                         memcpy(gpu_vaddr + swizzled_gpu_offset,
376                                cpu_vaddr + cpu_offset,
377                                this_length);
378                 }
379                 cpu_offset += this_length;
380                 gpu_offset += this_length;
381                 length -= this_length;
382         }
383
384         kunmap(cpu_page);
385         kunmap(gpu_page);
386 }
387
388 /**
389  * This is the fast shmem pread path, which attempts to copy_from_user directly
390  * from the backing pages of the object to the user's address space.  On a
391  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
392  */
393 static int
394 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
395                           struct drm_i915_gem_pread *args,
396                           struct drm_file *file_priv)
397 {
398         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
399         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
400         ssize_t remain;
401         loff_t offset;
402         char __user *user_data;
403         int page_offset, page_length;
404
405         user_data = (char __user *) (uintptr_t) args->data_ptr;
406         remain = args->size;
407
408         obj_priv = to_intel_bo(obj);
409         offset = args->offset;
410
411         while (remain > 0) {
412                 struct page *page;
413                 char *vaddr;
414                 int ret;
415
416                 /* Operation in this page
417                  *
418                  * page_offset = offset within page
419                  * page_length = bytes to copy for this page
420                  */
421                 page_offset = offset & (PAGE_SIZE-1);
422                 page_length = remain;
423                 if ((page_offset + remain) > PAGE_SIZE)
424                         page_length = PAGE_SIZE - page_offset;
425
426                 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
427                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
428                 if (IS_ERR(page))
429                         return PTR_ERR(page);
430
431                 vaddr = kmap_atomic(page);
432                 ret = __copy_to_user_inatomic(user_data,
433                                               vaddr + page_offset,
434                                               page_length);
435                 kunmap_atomic(vaddr);
436
437                 mark_page_accessed(page);
438                 page_cache_release(page);
439                 if (ret)
440                         return -EFAULT;
441
442                 remain -= page_length;
443                 user_data += page_length;
444                 offset += page_length;
445         }
446
447         return 0;
448 }
449
450 /**
451  * This is the fallback shmem pread path, which allocates temporary storage
452  * in kernel space to copy_to_user into outside of the struct_mutex, so we
453  * can copy out of the object's backing pages while holding the struct mutex
454  * and not take page faults.
455  */
456 static int
457 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
458                           struct drm_i915_gem_pread *args,
459                           struct drm_file *file_priv)
460 {
461         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
462         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
463         struct mm_struct *mm = current->mm;
464         struct page **user_pages;
465         ssize_t remain;
466         loff_t offset, pinned_pages, i;
467         loff_t first_data_page, last_data_page, num_pages;
468         int shmem_page_offset;
469         int data_page_index, data_page_offset;
470         int page_length;
471         int ret;
472         uint64_t data_ptr = args->data_ptr;
473         int do_bit17_swizzling;
474
475         remain = args->size;
476
477         /* Pin the user pages containing the data.  We can't fault while
478          * holding the struct mutex, yet we want to hold it while
479          * dereferencing the user data.
480          */
481         first_data_page = data_ptr / PAGE_SIZE;
482         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
483         num_pages = last_data_page - first_data_page + 1;
484
485         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
486         if (user_pages == NULL)
487                 return -ENOMEM;
488
489         mutex_unlock(&dev->struct_mutex);
490         down_read(&mm->mmap_sem);
491         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
492                                       num_pages, 1, 0, user_pages, NULL);
493         up_read(&mm->mmap_sem);
494         mutex_lock(&dev->struct_mutex);
495         if (pinned_pages < num_pages) {
496                 ret = -EFAULT;
497                 goto out;
498         }
499
500         ret = i915_gem_object_set_cpu_read_domain_range(obj,
501                                                         args->offset,
502                                                         args->size);
503         if (ret)
504                 goto out;
505
506         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
507
508         obj_priv = to_intel_bo(obj);
509         offset = args->offset;
510
511         while (remain > 0) {
512                 struct page *page;
513
514                 /* Operation in this page
515                  *
516                  * shmem_page_offset = offset within page in shmem file
517                  * data_page_index = page number in get_user_pages return
518                  * data_page_offset = offset with data_page_index page.
519                  * page_length = bytes to copy for this page
520                  */
521                 shmem_page_offset = offset & ~PAGE_MASK;
522                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
523                 data_page_offset = data_ptr & ~PAGE_MASK;
524
525                 page_length = remain;
526                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
527                         page_length = PAGE_SIZE - shmem_page_offset;
528                 if ((data_page_offset + page_length) > PAGE_SIZE)
529                         page_length = PAGE_SIZE - data_page_offset;
530
531                 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
532                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
533                 if (IS_ERR(page))
534                         return PTR_ERR(page);
535
536                 if (do_bit17_swizzling) {
537                         slow_shmem_bit17_copy(page,
538                                               shmem_page_offset,
539                                               user_pages[data_page_index],
540                                               data_page_offset,
541                                               page_length,
542                                               1);
543                 } else {
544                         slow_shmem_copy(user_pages[data_page_index],
545                                         data_page_offset,
546                                         page,
547                                         shmem_page_offset,
548                                         page_length);
549                 }
550
551                 mark_page_accessed(page);
552                 page_cache_release(page);
553
554                 remain -= page_length;
555                 data_ptr += page_length;
556                 offset += page_length;
557         }
558
559 out:
560         for (i = 0; i < pinned_pages; i++) {
561                 SetPageDirty(user_pages[i]);
562                 mark_page_accessed(user_pages[i]);
563                 page_cache_release(user_pages[i]);
564         }
565         drm_free_large(user_pages);
566
567         return ret;
568 }
569
570 /**
571  * Reads data from the object referenced by handle.
572  *
573  * On error, the contents of *data are undefined.
574  */
575 int
576 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
577                      struct drm_file *file_priv)
578 {
579         struct drm_i915_gem_pread *args = data;
580         struct drm_gem_object *obj;
581         struct drm_i915_gem_object *obj_priv;
582         int ret = 0;
583
584         ret = i915_mutex_lock_interruptible(dev);
585         if (ret)
586                 return ret;
587
588         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
589         if (obj == NULL) {
590                 ret = -ENOENT;
591                 goto unlock;
592         }
593         obj_priv = to_intel_bo(obj);
594
595         /* Bounds check source.  */
596         if (args->offset > obj->size || args->size > obj->size - args->offset) {
597                 ret = -EINVAL;
598                 goto out;
599         }
600
601         if (args->size == 0)
602                 goto out;
603
604         if (!access_ok(VERIFY_WRITE,
605                        (char __user *)(uintptr_t)args->data_ptr,
606                        args->size)) {
607                 ret = -EFAULT;
608                 goto out;
609         }
610
611         ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
612                                        args->size);
613         if (ret) {
614                 ret = -EFAULT;
615                 goto out;
616         }
617
618         ret = i915_gem_object_set_cpu_read_domain_range(obj,
619                                                         args->offset,
620                                                         args->size);
621         if (ret)
622                 goto out;
623
624         ret = -EFAULT;
625         if (!i915_gem_object_needs_bit17_swizzle(obj))
626                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
627         if (ret == -EFAULT)
628                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
629
630 out:
631         drm_gem_object_unreference(obj);
632 unlock:
633         mutex_unlock(&dev->struct_mutex);
634         return ret;
635 }
636
637 /* This is the fast write path which cannot handle
638  * page faults in the source data
639  */
640
641 static inline int
642 fast_user_write(struct io_mapping *mapping,
643                 loff_t page_base, int page_offset,
644                 char __user *user_data,
645                 int length)
646 {
647         char *vaddr_atomic;
648         unsigned long unwritten;
649
650         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
651         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
652                                                       user_data, length);
653         io_mapping_unmap_atomic(vaddr_atomic);
654         return unwritten;
655 }
656
657 /* Here's the write path which can sleep for
658  * page faults
659  */
660
661 static inline void
662 slow_kernel_write(struct io_mapping *mapping,
663                   loff_t gtt_base, int gtt_offset,
664                   struct page *user_page, int user_offset,
665                   int length)
666 {
667         char __iomem *dst_vaddr;
668         char *src_vaddr;
669
670         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
671         src_vaddr = kmap(user_page);
672
673         memcpy_toio(dst_vaddr + gtt_offset,
674                     src_vaddr + user_offset,
675                     length);
676
677         kunmap(user_page);
678         io_mapping_unmap(dst_vaddr);
679 }
680
681 /**
682  * This is the fast pwrite path, where we copy the data directly from the
683  * user into the GTT, uncached.
684  */
685 static int
686 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
687                          struct drm_i915_gem_pwrite *args,
688                          struct drm_file *file_priv)
689 {
690         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
691         drm_i915_private_t *dev_priv = dev->dev_private;
692         ssize_t remain;
693         loff_t offset, page_base;
694         char __user *user_data;
695         int page_offset, page_length;
696
697         user_data = (char __user *) (uintptr_t) args->data_ptr;
698         remain = args->size;
699
700         obj_priv = to_intel_bo(obj);
701         offset = obj_priv->gtt_offset + args->offset;
702
703         while (remain > 0) {
704                 /* Operation in this page
705                  *
706                  * page_base = page offset within aperture
707                  * page_offset = offset within page
708                  * page_length = bytes to copy for this page
709                  */
710                 page_base = (offset & ~(PAGE_SIZE-1));
711                 page_offset = offset & (PAGE_SIZE-1);
712                 page_length = remain;
713                 if ((page_offset + remain) > PAGE_SIZE)
714                         page_length = PAGE_SIZE - page_offset;
715
716                 /* If we get a fault while copying data, then (presumably) our
717                  * source page isn't available.  Return the error and we'll
718                  * retry in the slow path.
719                  */
720                 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
721                                     page_offset, user_data, page_length))
722
723                         return -EFAULT;
724
725                 remain -= page_length;
726                 user_data += page_length;
727                 offset += page_length;
728         }
729
730         return 0;
731 }
732
733 /**
734  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
735  * the memory and maps it using kmap_atomic for copying.
736  *
737  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
738  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
739  */
740 static int
741 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
742                          struct drm_i915_gem_pwrite *args,
743                          struct drm_file *file_priv)
744 {
745         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
746         drm_i915_private_t *dev_priv = dev->dev_private;
747         ssize_t remain;
748         loff_t gtt_page_base, offset;
749         loff_t first_data_page, last_data_page, num_pages;
750         loff_t pinned_pages, i;
751         struct page **user_pages;
752         struct mm_struct *mm = current->mm;
753         int gtt_page_offset, data_page_offset, data_page_index, page_length;
754         int ret;
755         uint64_t data_ptr = args->data_ptr;
756
757         remain = args->size;
758
759         /* Pin the user pages containing the data.  We can't fault while
760          * holding the struct mutex, and all of the pwrite implementations
761          * want to hold it while dereferencing the user data.
762          */
763         first_data_page = data_ptr / PAGE_SIZE;
764         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
765         num_pages = last_data_page - first_data_page + 1;
766
767         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
768         if (user_pages == NULL)
769                 return -ENOMEM;
770
771         mutex_unlock(&dev->struct_mutex);
772         down_read(&mm->mmap_sem);
773         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
774                                       num_pages, 0, 0, user_pages, NULL);
775         up_read(&mm->mmap_sem);
776         mutex_lock(&dev->struct_mutex);
777         if (pinned_pages < num_pages) {
778                 ret = -EFAULT;
779                 goto out_unpin_pages;
780         }
781
782         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
783         if (ret)
784                 goto out_unpin_pages;
785
786         obj_priv = to_intel_bo(obj);
787         offset = obj_priv->gtt_offset + args->offset;
788
789         while (remain > 0) {
790                 /* Operation in this page
791                  *
792                  * gtt_page_base = page offset within aperture
793                  * gtt_page_offset = offset within page in aperture
794                  * data_page_index = page number in get_user_pages return
795                  * data_page_offset = offset with data_page_index page.
796                  * page_length = bytes to copy for this page
797                  */
798                 gtt_page_base = offset & PAGE_MASK;
799                 gtt_page_offset = offset & ~PAGE_MASK;
800                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
801                 data_page_offset = data_ptr & ~PAGE_MASK;
802
803                 page_length = remain;
804                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
805                         page_length = PAGE_SIZE - gtt_page_offset;
806                 if ((data_page_offset + page_length) > PAGE_SIZE)
807                         page_length = PAGE_SIZE - data_page_offset;
808
809                 slow_kernel_write(dev_priv->mm.gtt_mapping,
810                                   gtt_page_base, gtt_page_offset,
811                                   user_pages[data_page_index],
812                                   data_page_offset,
813                                   page_length);
814
815                 remain -= page_length;
816                 offset += page_length;
817                 data_ptr += page_length;
818         }
819
820 out_unpin_pages:
821         for (i = 0; i < pinned_pages; i++)
822                 page_cache_release(user_pages[i]);
823         drm_free_large(user_pages);
824
825         return ret;
826 }
827
828 /**
829  * This is the fast shmem pwrite path, which attempts to directly
830  * copy_from_user into the kmapped pages backing the object.
831  */
832 static int
833 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
834                            struct drm_i915_gem_pwrite *args,
835                            struct drm_file *file_priv)
836 {
837         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
838         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
839         ssize_t remain;
840         loff_t offset;
841         char __user *user_data;
842         int page_offset, page_length;
843
844         user_data = (char __user *) (uintptr_t) args->data_ptr;
845         remain = args->size;
846
847         obj_priv = to_intel_bo(obj);
848         offset = args->offset;
849         obj_priv->dirty = 1;
850
851         while (remain > 0) {
852                 struct page *page;
853                 char *vaddr;
854                 int ret;
855
856                 /* Operation in this page
857                  *
858                  * page_offset = offset within page
859                  * page_length = bytes to copy for this page
860                  */
861                 page_offset = offset & (PAGE_SIZE-1);
862                 page_length = remain;
863                 if ((page_offset + remain) > PAGE_SIZE)
864                         page_length = PAGE_SIZE - page_offset;
865
866                 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
867                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
868                 if (IS_ERR(page))
869                         return PTR_ERR(page);
870
871                 vaddr = kmap_atomic(page, KM_USER0);
872                 ret = __copy_from_user_inatomic(vaddr + page_offset,
873                                                 user_data,
874                                                 page_length);
875                 kunmap_atomic(vaddr, KM_USER0);
876
877                 set_page_dirty(page);
878                 mark_page_accessed(page);
879                 page_cache_release(page);
880
881                 /* If we get a fault while copying data, then (presumably) our
882                  * source page isn't available.  Return the error and we'll
883                  * retry in the slow path.
884                  */
885                 if (ret)
886                         return -EFAULT;
887
888                 remain -= page_length;
889                 user_data += page_length;
890                 offset += page_length;
891         }
892
893         return 0;
894 }
895
896 /**
897  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
898  * the memory and maps it using kmap_atomic for copying.
899  *
900  * This avoids taking mmap_sem for faulting on the user's address while the
901  * struct_mutex is held.
902  */
903 static int
904 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
905                            struct drm_i915_gem_pwrite *args,
906                            struct drm_file *file_priv)
907 {
908         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
909         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
910         struct mm_struct *mm = current->mm;
911         struct page **user_pages;
912         ssize_t remain;
913         loff_t offset, pinned_pages, i;
914         loff_t first_data_page, last_data_page, num_pages;
915         int shmem_page_offset;
916         int data_page_index,  data_page_offset;
917         int page_length;
918         int ret;
919         uint64_t data_ptr = args->data_ptr;
920         int do_bit17_swizzling;
921
922         remain = args->size;
923
924         /* Pin the user pages containing the data.  We can't fault while
925          * holding the struct mutex, and all of the pwrite implementations
926          * want to hold it while dereferencing the user data.
927          */
928         first_data_page = data_ptr / PAGE_SIZE;
929         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
930         num_pages = last_data_page - first_data_page + 1;
931
932         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
933         if (user_pages == NULL)
934                 return -ENOMEM;
935
936         mutex_unlock(&dev->struct_mutex);
937         down_read(&mm->mmap_sem);
938         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
939                                       num_pages, 0, 0, user_pages, NULL);
940         up_read(&mm->mmap_sem);
941         mutex_lock(&dev->struct_mutex);
942         if (pinned_pages < num_pages) {
943                 ret = -EFAULT;
944                 goto out;
945         }
946
947         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
948         if (ret)
949                 goto out;
950
951         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
952
953         obj_priv = to_intel_bo(obj);
954         offset = args->offset;
955         obj_priv->dirty = 1;
956
957         while (remain > 0) {
958                 struct page *page;
959
960                 /* Operation in this page
961                  *
962                  * shmem_page_offset = offset within page in shmem file
963                  * data_page_index = page number in get_user_pages return
964                  * data_page_offset = offset with data_page_index page.
965                  * page_length = bytes to copy for this page
966                  */
967                 shmem_page_offset = offset & ~PAGE_MASK;
968                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
969                 data_page_offset = data_ptr & ~PAGE_MASK;
970
971                 page_length = remain;
972                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
973                         page_length = PAGE_SIZE - shmem_page_offset;
974                 if ((data_page_offset + page_length) > PAGE_SIZE)
975                         page_length = PAGE_SIZE - data_page_offset;
976
977                 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
978                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
979                 if (IS_ERR(page)) {
980                         ret = PTR_ERR(page);
981                         goto out;
982                 }
983
984                 if (do_bit17_swizzling) {
985                         slow_shmem_bit17_copy(page,
986                                               shmem_page_offset,
987                                               user_pages[data_page_index],
988                                               data_page_offset,
989                                               page_length,
990                                               0);
991                 } else {
992                         slow_shmem_copy(page,
993                                         shmem_page_offset,
994                                         user_pages[data_page_index],
995                                         data_page_offset,
996                                         page_length);
997                 }
998
999                 set_page_dirty(page);
1000                 mark_page_accessed(page);
1001                 page_cache_release(page);
1002
1003                 remain -= page_length;
1004                 data_ptr += page_length;
1005                 offset += page_length;
1006         }
1007
1008 out:
1009         for (i = 0; i < pinned_pages; i++)
1010                 page_cache_release(user_pages[i]);
1011         drm_free_large(user_pages);
1012
1013         return ret;
1014 }
1015
1016 /**
1017  * Writes data to the object referenced by handle.
1018  *
1019  * On error, the contents of the buffer that were to be modified are undefined.
1020  */
1021 int
1022 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1023                       struct drm_file *file)
1024 {
1025         struct drm_i915_gem_pwrite *args = data;
1026         struct drm_gem_object *obj;
1027         struct drm_i915_gem_object *obj_priv;
1028         int ret = 0;
1029
1030         ret = i915_mutex_lock_interruptible(dev);
1031         if (ret)
1032                 return ret;
1033
1034         obj = drm_gem_object_lookup(dev, file, args->handle);
1035         if (obj == NULL) {
1036                 ret = -ENOENT;
1037                 goto unlock;
1038         }
1039         obj_priv = to_intel_bo(obj);
1040
1041
1042         /* Bounds check destination. */
1043         if (args->offset > obj->size || args->size > obj->size - args->offset) {
1044                 ret = -EINVAL;
1045                 goto out;
1046         }
1047
1048         if (args->size == 0)
1049                 goto out;
1050
1051         if (!access_ok(VERIFY_READ,
1052                        (char __user *)(uintptr_t)args->data_ptr,
1053                        args->size)) {
1054                 ret = -EFAULT;
1055                 goto out;
1056         }
1057
1058         ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
1059                                       args->size);
1060         if (ret) {
1061                 ret = -EFAULT;
1062                 goto out;
1063         }
1064
1065         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1066          * it would end up going through the fenced access, and we'll get
1067          * different detiling behavior between reading and writing.
1068          * pread/pwrite currently are reading and writing from the CPU
1069          * perspective, requiring manual detiling by the client.
1070          */
1071         if (obj_priv->phys_obj)
1072                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1073         else if (obj_priv->tiling_mode == I915_TILING_NONE &&
1074                  obj_priv->gtt_space &&
1075                  obj->write_domain != I915_GEM_DOMAIN_CPU) {
1076                 ret = i915_gem_object_pin(obj, 0, true);
1077                 if (ret)
1078                         goto out;
1079
1080                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1081                 if (ret)
1082                         goto out_unpin;
1083
1084                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1085                 if (ret == -EFAULT)
1086                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1087
1088 out_unpin:
1089                 i915_gem_object_unpin(obj);
1090         } else {
1091                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1092                 if (ret)
1093                         goto out;
1094
1095                 ret = -EFAULT;
1096                 if (!i915_gem_object_needs_bit17_swizzle(obj))
1097                         ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1098                 if (ret == -EFAULT)
1099                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1100         }
1101
1102 out:
1103         drm_gem_object_unreference(obj);
1104 unlock:
1105         mutex_unlock(&dev->struct_mutex);
1106         return ret;
1107 }
1108
1109 /**
1110  * Called when user space prepares to use an object with the CPU, either
1111  * through the mmap ioctl's mapping or a GTT mapping.
1112  */
1113 int
1114 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1115                           struct drm_file *file_priv)
1116 {
1117         struct drm_i915_private *dev_priv = dev->dev_private;
1118         struct drm_i915_gem_set_domain *args = data;
1119         struct drm_gem_object *obj;
1120         struct drm_i915_gem_object *obj_priv;
1121         uint32_t read_domains = args->read_domains;
1122         uint32_t write_domain = args->write_domain;
1123         int ret;
1124
1125         if (!(dev->driver->driver_features & DRIVER_GEM))
1126                 return -ENODEV;
1127
1128         /* Only handle setting domains to types used by the CPU. */
1129         if (write_domain & I915_GEM_GPU_DOMAINS)
1130                 return -EINVAL;
1131
1132         if (read_domains & I915_GEM_GPU_DOMAINS)
1133                 return -EINVAL;
1134
1135         /* Having something in the write domain implies it's in the read
1136          * domain, and only that read domain.  Enforce that in the request.
1137          */
1138         if (write_domain != 0 && read_domains != write_domain)
1139                 return -EINVAL;
1140
1141         ret = i915_mutex_lock_interruptible(dev);
1142         if (ret)
1143                 return ret;
1144
1145         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1146         if (obj == NULL) {
1147                 ret = -ENOENT;
1148                 goto unlock;
1149         }
1150         obj_priv = to_intel_bo(obj);
1151
1152         intel_mark_busy(dev, obj);
1153
1154         if (read_domains & I915_GEM_DOMAIN_GTT) {
1155                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1156
1157                 /* Update the LRU on the fence for the CPU access that's
1158                  * about to occur.
1159                  */
1160                 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1161                         struct drm_i915_fence_reg *reg =
1162                                 &dev_priv->fence_regs[obj_priv->fence_reg];
1163                         list_move_tail(&reg->lru_list,
1164                                        &dev_priv->mm.fence_list);
1165                 }
1166
1167                 /* Silently promote "you're not bound, there was nothing to do"
1168                  * to success, since the client was just asking us to
1169                  * make sure everything was done.
1170                  */
1171                 if (ret == -EINVAL)
1172                         ret = 0;
1173         } else {
1174                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1175         }
1176
1177         /* Maintain LRU order of "inactive" objects */
1178         if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1179                 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1180
1181         drm_gem_object_unreference(obj);
1182 unlock:
1183         mutex_unlock(&dev->struct_mutex);
1184         return ret;
1185 }
1186
1187 /**
1188  * Called when user space has done writes to this buffer
1189  */
1190 int
1191 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1192                       struct drm_file *file_priv)
1193 {
1194         struct drm_i915_gem_sw_finish *args = data;
1195         struct drm_gem_object *obj;
1196         int ret = 0;
1197
1198         if (!(dev->driver->driver_features & DRIVER_GEM))
1199                 return -ENODEV;
1200
1201         ret = i915_mutex_lock_interruptible(dev);
1202         if (ret)
1203                 return ret;
1204
1205         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1206         if (obj == NULL) {
1207                 ret = -ENOENT;
1208                 goto unlock;
1209         }
1210
1211         /* Pinned buffers may be scanout, so flush the cache */
1212         if (to_intel_bo(obj)->pin_count)
1213                 i915_gem_object_flush_cpu_write_domain(obj);
1214
1215         drm_gem_object_unreference(obj);
1216 unlock:
1217         mutex_unlock(&dev->struct_mutex);
1218         return ret;
1219 }
1220
1221 /**
1222  * Maps the contents of an object, returning the address it is mapped
1223  * into.
1224  *
1225  * While the mapping holds a reference on the contents of the object, it doesn't
1226  * imply a ref on the object itself.
1227  */
1228 int
1229 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1230                    struct drm_file *file_priv)
1231 {
1232         struct drm_i915_private *dev_priv = dev->dev_private;
1233         struct drm_i915_gem_mmap *args = data;
1234         struct drm_gem_object *obj;
1235         loff_t offset;
1236         unsigned long addr;
1237
1238         if (!(dev->driver->driver_features & DRIVER_GEM))
1239                 return -ENODEV;
1240
1241         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1242         if (obj == NULL)
1243                 return -ENOENT;
1244
1245         if (obj->size > dev_priv->mm.gtt_mappable_end) {
1246                 drm_gem_object_unreference_unlocked(obj);
1247                 return -E2BIG;
1248         }
1249
1250         offset = args->offset;
1251
1252         down_write(&current->mm->mmap_sem);
1253         addr = do_mmap(obj->filp, 0, args->size,
1254                        PROT_READ | PROT_WRITE, MAP_SHARED,
1255                        args->offset);
1256         up_write(&current->mm->mmap_sem);
1257         drm_gem_object_unreference_unlocked(obj);
1258         if (IS_ERR((void *)addr))
1259                 return addr;
1260
1261         args->addr_ptr = (uint64_t) addr;
1262
1263         return 0;
1264 }
1265
1266 /**
1267  * i915_gem_fault - fault a page into the GTT
1268  * vma: VMA in question
1269  * vmf: fault info
1270  *
1271  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1272  * from userspace.  The fault handler takes care of binding the object to
1273  * the GTT (if needed), allocating and programming a fence register (again,
1274  * only if needed based on whether the old reg is still valid or the object
1275  * is tiled) and inserting a new PTE into the faulting process.
1276  *
1277  * Note that the faulting process may involve evicting existing objects
1278  * from the GTT and/or fence registers to make room.  So performance may
1279  * suffer if the GTT working set is large or there are few fence registers
1280  * left.
1281  */
1282 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1283 {
1284         struct drm_gem_object *obj = vma->vm_private_data;
1285         struct drm_device *dev = obj->dev;
1286         drm_i915_private_t *dev_priv = dev->dev_private;
1287         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1288         pgoff_t page_offset;
1289         unsigned long pfn;
1290         int ret = 0;
1291         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1292
1293         /* We don't use vmf->pgoff since that has the fake offset */
1294         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1295                 PAGE_SHIFT;
1296
1297         /* Now bind it into the GTT if needed */
1298         mutex_lock(&dev->struct_mutex);
1299         BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable);
1300
1301         if (obj_priv->gtt_space) {
1302                 if (!obj_priv->map_and_fenceable) {
1303                         ret = i915_gem_object_unbind(obj);
1304                         if (ret)
1305                                 goto unlock;
1306                 }
1307         }
1308
1309         if (!obj_priv->gtt_space) {
1310                 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1311                 if (ret)
1312                         goto unlock;
1313         }
1314
1315         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1316         if (ret)
1317                 goto unlock;
1318
1319         if (!obj_priv->fault_mappable) {
1320                 obj_priv->fault_mappable = true;
1321                 i915_gem_info_update_mappable(dev_priv, obj_priv, true);
1322         }
1323
1324         /* Need a new fence register? */
1325         if (obj_priv->tiling_mode != I915_TILING_NONE) {
1326                 ret = i915_gem_object_get_fence_reg(obj, true);
1327                 if (ret)
1328                         goto unlock;
1329         }
1330
1331         if (i915_gem_object_is_inactive(obj_priv))
1332                 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1333
1334         pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1335                 page_offset;
1336
1337         /* Finally, remap it using the new GTT offset */
1338         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1339 unlock:
1340         mutex_unlock(&dev->struct_mutex);
1341
1342         switch (ret) {
1343         case -EAGAIN:
1344                 set_need_resched();
1345         case 0:
1346         case -ERESTARTSYS:
1347                 return VM_FAULT_NOPAGE;
1348         case -ENOMEM:
1349                 return VM_FAULT_OOM;
1350         default:
1351                 return VM_FAULT_SIGBUS;
1352         }
1353 }
1354
1355 /**
1356  * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1357  * @obj: obj in question
1358  *
1359  * GEM memory mapping works by handing back to userspace a fake mmap offset
1360  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1361  * up the object based on the offset and sets up the various memory mapping
1362  * structures.
1363  *
1364  * This routine allocates and attaches a fake offset for @obj.
1365  */
1366 static int
1367 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1368 {
1369         struct drm_device *dev = obj->dev;
1370         struct drm_gem_mm *mm = dev->mm_private;
1371         struct drm_map_list *list;
1372         struct drm_local_map *map;
1373         int ret = 0;
1374
1375         /* Set the object up for mmap'ing */
1376         list = &obj->map_list;
1377         list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1378         if (!list->map)
1379                 return -ENOMEM;
1380
1381         map = list->map;
1382         map->type = _DRM_GEM;
1383         map->size = obj->size;
1384         map->handle = obj;
1385
1386         /* Get a DRM GEM mmap offset allocated... */
1387         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1388                                                     obj->size / PAGE_SIZE, 0, 0);
1389         if (!list->file_offset_node) {
1390                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1391                 ret = -ENOSPC;
1392                 goto out_free_list;
1393         }
1394
1395         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1396                                                   obj->size / PAGE_SIZE, 0);
1397         if (!list->file_offset_node) {
1398                 ret = -ENOMEM;
1399                 goto out_free_list;
1400         }
1401
1402         list->hash.key = list->file_offset_node->start;
1403         ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1404         if (ret) {
1405                 DRM_ERROR("failed to add to map hash\n");
1406                 goto out_free_mm;
1407         }
1408
1409         return 0;
1410
1411 out_free_mm:
1412         drm_mm_put_block(list->file_offset_node);
1413 out_free_list:
1414         kfree(list->map);
1415         list->map = NULL;
1416
1417         return ret;
1418 }
1419
1420 /**
1421  * i915_gem_release_mmap - remove physical page mappings
1422  * @obj: obj in question
1423  *
1424  * Preserve the reservation of the mmapping with the DRM core code, but
1425  * relinquish ownership of the pages back to the system.
1426  *
1427  * It is vital that we remove the page mapping if we have mapped a tiled
1428  * object through the GTT and then lose the fence register due to
1429  * resource pressure. Similarly if the object has been moved out of the
1430  * aperture, than pages mapped into userspace must be revoked. Removing the
1431  * mapping will then trigger a page fault on the next user access, allowing
1432  * fixup by i915_gem_fault().
1433  */
1434 void
1435 i915_gem_release_mmap(struct drm_gem_object *obj)
1436 {
1437         struct drm_device *dev = obj->dev;
1438         struct drm_i915_private *dev_priv = dev->dev_private;
1439         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1440
1441         if (unlikely(obj->map_list.map && dev->dev_mapping))
1442                 unmap_mapping_range(dev->dev_mapping,
1443                                     (loff_t)obj->map_list.hash.key<<PAGE_SHIFT,
1444                                     obj->size, 1);
1445
1446         if (obj_priv->fault_mappable) {
1447                 obj_priv->fault_mappable = false;
1448                 i915_gem_info_update_mappable(dev_priv, obj_priv, false);
1449         }
1450 }
1451
1452 static void
1453 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1454 {
1455         struct drm_device *dev = obj->dev;
1456         struct drm_gem_mm *mm = dev->mm_private;
1457         struct drm_map_list *list = &obj->map_list;
1458
1459         drm_ht_remove_item(&mm->offset_hash, &list->hash);
1460         drm_mm_put_block(list->file_offset_node);
1461         kfree(list->map);
1462         list->map = NULL;
1463 }
1464
1465 /**
1466  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1467  * @obj: object to check
1468  *
1469  * Return the required GTT alignment for an object, taking into account
1470  * potential fence register mapping.
1471  */
1472 static uint32_t
1473 i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv)
1474 {
1475         struct drm_device *dev = obj_priv->base.dev;
1476
1477         /*
1478          * Minimum alignment is 4k (GTT page size), but might be greater
1479          * if a fence register is needed for the object.
1480          */
1481         if (INTEL_INFO(dev)->gen >= 4 ||
1482             obj_priv->tiling_mode == I915_TILING_NONE)
1483                 return 4096;
1484
1485         /*
1486          * Previous chips need to be aligned to the size of the smallest
1487          * fence register that can contain the object.
1488          */
1489         return i915_gem_get_gtt_size(obj_priv);
1490 }
1491
1492 /**
1493  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1494  *                                       unfenced object
1495  * @obj: object to check
1496  *
1497  * Return the required GTT alignment for an object, only taking into account
1498  * unfenced tiled surface requirements.
1499  */
1500 static uint32_t
1501 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv)
1502 {
1503         struct drm_device *dev = obj_priv->base.dev;
1504         int tile_height;
1505
1506         /*
1507          * Minimum alignment is 4k (GTT page size) for sane hw.
1508          */
1509         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1510             obj_priv->tiling_mode == I915_TILING_NONE)
1511                 return 4096;
1512
1513         /*
1514          * Older chips need unfenced tiled buffers to be aligned to the left
1515          * edge of an even tile row (where tile rows are counted as if the bo is
1516          * placed in a fenced gtt region).
1517          */
1518         if (IS_GEN2(dev) ||
1519             (obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
1520                 tile_height = 32;
1521         else
1522                 tile_height = 8;
1523
1524         return tile_height * obj_priv->stride * 2;
1525 }
1526
1527 static uint32_t
1528 i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
1529 {
1530         struct drm_device *dev = obj_priv->base.dev;
1531         uint32_t size;
1532
1533         /*
1534          * Minimum alignment is 4k (GTT page size), but might be greater
1535          * if a fence register is needed for the object.
1536          */
1537         if (INTEL_INFO(dev)->gen >= 4)
1538                 return obj_priv->base.size;
1539
1540         /*
1541          * Previous chips need to be aligned to the size of the smallest
1542          * fence register that can contain the object.
1543          */
1544         if (INTEL_INFO(dev)->gen == 3)
1545                 size = 1024*1024;
1546         else
1547                 size = 512*1024;
1548
1549         while (size < obj_priv->base.size)
1550                 size <<= 1;
1551
1552         return size;
1553 }
1554
1555 /**
1556  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1557  * @dev: DRM device
1558  * @data: GTT mapping ioctl data
1559  * @file_priv: GEM object info
1560  *
1561  * Simply returns the fake offset to userspace so it can mmap it.
1562  * The mmap call will end up in drm_gem_mmap(), which will set things
1563  * up so we can get faults in the handler above.
1564  *
1565  * The fault handler will take care of binding the object into the GTT
1566  * (since it may have been evicted to make room for something), allocating
1567  * a fence register, and mapping the appropriate aperture address into
1568  * userspace.
1569  */
1570 int
1571 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1572                         struct drm_file *file_priv)
1573 {
1574         struct drm_i915_private *dev_priv = dev->dev_private;
1575         struct drm_i915_gem_mmap_gtt *args = data;
1576         struct drm_gem_object *obj;
1577         struct drm_i915_gem_object *obj_priv;
1578         int ret;
1579
1580         if (!(dev->driver->driver_features & DRIVER_GEM))
1581                 return -ENODEV;
1582
1583         ret = i915_mutex_lock_interruptible(dev);
1584         if (ret)
1585                 return ret;
1586
1587         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1588         if (obj == NULL) {
1589                 ret = -ENOENT;
1590                 goto unlock;
1591         }
1592         obj_priv = to_intel_bo(obj);
1593
1594         if (obj->size > dev_priv->mm.gtt_mappable_end) {
1595                 ret = -E2BIG;
1596                 goto unlock;
1597         }
1598
1599         if (obj_priv->madv != I915_MADV_WILLNEED) {
1600                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1601                 ret = -EINVAL;
1602                 goto out;
1603         }
1604
1605         if (!obj->map_list.map) {
1606                 ret = i915_gem_create_mmap_offset(obj);
1607                 if (ret)
1608                         goto out;
1609         }
1610
1611         args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
1612
1613 out:
1614         drm_gem_object_unreference(obj);
1615 unlock:
1616         mutex_unlock(&dev->struct_mutex);
1617         return ret;
1618 }
1619
1620 static int
1621 i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
1622                               gfp_t gfpmask)
1623 {
1624         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1625         int page_count, i;
1626         struct address_space *mapping;
1627         struct inode *inode;
1628         struct page *page;
1629
1630         /* Get the list of pages out of our struct file.  They'll be pinned
1631          * at this point until we release them.
1632          */
1633         page_count = obj->size / PAGE_SIZE;
1634         BUG_ON(obj_priv->pages != NULL);
1635         obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1636         if (obj_priv->pages == NULL)
1637                 return -ENOMEM;
1638
1639         inode = obj->filp->f_path.dentry->d_inode;
1640         mapping = inode->i_mapping;
1641         for (i = 0; i < page_count; i++) {
1642                 page = read_cache_page_gfp(mapping, i,
1643                                            GFP_HIGHUSER |
1644                                            __GFP_COLD |
1645                                            __GFP_RECLAIMABLE |
1646                                            gfpmask);
1647                 if (IS_ERR(page))
1648                         goto err_pages;
1649
1650                 obj_priv->pages[i] = page;
1651         }
1652
1653         if (obj_priv->tiling_mode != I915_TILING_NONE)
1654                 i915_gem_object_do_bit_17_swizzle(obj);
1655
1656         return 0;
1657
1658 err_pages:
1659         while (i--)
1660                 page_cache_release(obj_priv->pages[i]);
1661
1662         drm_free_large(obj_priv->pages);
1663         obj_priv->pages = NULL;
1664         return PTR_ERR(page);
1665 }
1666
1667 static void
1668 i915_gem_object_put_pages_gtt(struct drm_gem_object *obj)
1669 {
1670         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1671         int page_count = obj->size / PAGE_SIZE;
1672         int i;
1673
1674         BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1675
1676         if (obj_priv->tiling_mode != I915_TILING_NONE)
1677                 i915_gem_object_save_bit_17_swizzle(obj);
1678
1679         if (obj_priv->madv == I915_MADV_DONTNEED)
1680                 obj_priv->dirty = 0;
1681
1682         for (i = 0; i < page_count; i++) {
1683                 if (obj_priv->dirty)
1684                         set_page_dirty(obj_priv->pages[i]);
1685
1686                 if (obj_priv->madv == I915_MADV_WILLNEED)
1687                         mark_page_accessed(obj_priv->pages[i]);
1688
1689                 page_cache_release(obj_priv->pages[i]);
1690         }
1691         obj_priv->dirty = 0;
1692
1693         drm_free_large(obj_priv->pages);
1694         obj_priv->pages = NULL;
1695 }
1696
1697 static uint32_t
1698 i915_gem_next_request_seqno(struct drm_device *dev,
1699                             struct intel_ring_buffer *ring)
1700 {
1701         drm_i915_private_t *dev_priv = dev->dev_private;
1702         return ring->outstanding_lazy_request = dev_priv->next_seqno;
1703 }
1704
1705 static void
1706 i915_gem_object_move_to_active(struct drm_gem_object *obj,
1707                                struct intel_ring_buffer *ring)
1708 {
1709         struct drm_device *dev = obj->dev;
1710         struct drm_i915_private *dev_priv = dev->dev_private;
1711         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1712         uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1713
1714         BUG_ON(ring == NULL);
1715         obj_priv->ring = ring;
1716
1717         /* Add a reference if we're newly entering the active list. */
1718         if (!obj_priv->active) {
1719                 drm_gem_object_reference(obj);
1720                 obj_priv->active = 1;
1721         }
1722
1723         /* Move from whatever list we were on to the tail of execution. */
1724         list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
1725         list_move_tail(&obj_priv->ring_list, &ring->active_list);
1726         obj_priv->last_rendering_seqno = seqno;
1727 }
1728
1729 static void
1730 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1731 {
1732         struct drm_device *dev = obj->dev;
1733         drm_i915_private_t *dev_priv = dev->dev_private;
1734         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1735
1736         BUG_ON(!obj_priv->active);
1737         list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
1738         list_del_init(&obj_priv->ring_list);
1739         obj_priv->last_rendering_seqno = 0;
1740 }
1741
1742 /* Immediately discard the backing storage */
1743 static void
1744 i915_gem_object_truncate(struct drm_gem_object *obj)
1745 {
1746         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1747         struct inode *inode;
1748
1749         /* Our goal here is to return as much of the memory as
1750          * is possible back to the system as we are called from OOM.
1751          * To do this we must instruct the shmfs to drop all of its
1752          * backing pages, *now*. Here we mirror the actions taken
1753          * when by shmem_delete_inode() to release the backing store.
1754          */
1755         inode = obj->filp->f_path.dentry->d_inode;
1756         truncate_inode_pages(inode->i_mapping, 0);
1757         if (inode->i_op->truncate_range)
1758                 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1759
1760         obj_priv->madv = __I915_MADV_PURGED;
1761 }
1762
1763 static inline int
1764 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1765 {
1766         return obj_priv->madv == I915_MADV_DONTNEED;
1767 }
1768
1769 static void
1770 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1771 {
1772         struct drm_device *dev = obj->dev;
1773         drm_i915_private_t *dev_priv = dev->dev_private;
1774         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1775
1776         if (obj_priv->pin_count != 0)
1777                 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
1778         else
1779                 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1780         list_del_init(&obj_priv->ring_list);
1781
1782         BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1783
1784         obj_priv->last_rendering_seqno = 0;
1785         obj_priv->ring = NULL;
1786         if (obj_priv->active) {
1787                 obj_priv->active = 0;
1788                 drm_gem_object_unreference(obj);
1789         }
1790         WARN_ON(i915_verify_lists(dev));
1791 }
1792
1793 static void
1794 i915_gem_process_flushing_list(struct drm_device *dev,
1795                                uint32_t flush_domains,
1796                                struct intel_ring_buffer *ring)
1797 {
1798         drm_i915_private_t *dev_priv = dev->dev_private;
1799         struct drm_i915_gem_object *obj_priv, *next;
1800
1801         list_for_each_entry_safe(obj_priv, next,
1802                                  &ring->gpu_write_list,
1803                                  gpu_write_list) {
1804                 struct drm_gem_object *obj = &obj_priv->base;
1805
1806                 if (obj->write_domain & flush_domains) {
1807                         uint32_t old_write_domain = obj->write_domain;
1808
1809                         obj->write_domain = 0;
1810                         list_del_init(&obj_priv->gpu_write_list);
1811                         i915_gem_object_move_to_active(obj, ring);
1812
1813                         /* update the fence lru list */
1814                         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1815                                 struct drm_i915_fence_reg *reg =
1816                                         &dev_priv->fence_regs[obj_priv->fence_reg];
1817                                 list_move_tail(&reg->lru_list,
1818                                                 &dev_priv->mm.fence_list);
1819                         }
1820
1821                         trace_i915_gem_object_change_domain(obj,
1822                                                             obj->read_domains,
1823                                                             old_write_domain);
1824                 }
1825         }
1826 }
1827
1828 int
1829 i915_add_request(struct drm_device *dev,
1830                  struct drm_file *file,
1831                  struct drm_i915_gem_request *request,
1832                  struct intel_ring_buffer *ring)
1833 {
1834         drm_i915_private_t *dev_priv = dev->dev_private;
1835         struct drm_i915_file_private *file_priv = NULL;
1836         uint32_t seqno;
1837         int was_empty;
1838         int ret;
1839
1840         BUG_ON(request == NULL);
1841
1842         if (file != NULL)
1843                 file_priv = file->driver_priv;
1844
1845         ret = ring->add_request(ring, &seqno);
1846         if (ret)
1847             return ret;
1848
1849         ring->outstanding_lazy_request = false;
1850
1851         request->seqno = seqno;
1852         request->ring = ring;
1853         request->emitted_jiffies = jiffies;
1854         was_empty = list_empty(&ring->request_list);
1855         list_add_tail(&request->list, &ring->request_list);
1856
1857         if (file_priv) {
1858                 spin_lock(&file_priv->mm.lock);
1859                 request->file_priv = file_priv;
1860                 list_add_tail(&request->client_list,
1861                               &file_priv->mm.request_list);
1862                 spin_unlock(&file_priv->mm.lock);
1863         }
1864
1865         if (!dev_priv->mm.suspended) {
1866                 mod_timer(&dev_priv->hangcheck_timer,
1867                           jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1868                 if (was_empty)
1869                         queue_delayed_work(dev_priv->wq,
1870                                            &dev_priv->mm.retire_work, HZ);
1871         }
1872         return 0;
1873 }
1874
1875 /**
1876  * Command execution barrier
1877  *
1878  * Ensures that all commands in the ring are finished
1879  * before signalling the CPU
1880  */
1881 static void
1882 i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1883 {
1884         uint32_t flush_domains = 0;
1885
1886         /* The sampler always gets flushed on i965 (sigh) */
1887         if (INTEL_INFO(dev)->gen >= 4)
1888                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1889
1890         ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
1891 }
1892
1893 static inline void
1894 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1895 {
1896         struct drm_i915_file_private *file_priv = request->file_priv;
1897
1898         if (!file_priv)
1899                 return;
1900
1901         spin_lock(&file_priv->mm.lock);
1902         list_del(&request->client_list);
1903         request->file_priv = NULL;
1904         spin_unlock(&file_priv->mm.lock);
1905 }
1906
1907 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1908                                       struct intel_ring_buffer *ring)
1909 {
1910         while (!list_empty(&ring->request_list)) {
1911                 struct drm_i915_gem_request *request;
1912
1913                 request = list_first_entry(&ring->request_list,
1914                                            struct drm_i915_gem_request,
1915                                            list);
1916
1917                 list_del(&request->list);
1918                 i915_gem_request_remove_from_client(request);
1919                 kfree(request);
1920         }
1921
1922         while (!list_empty(&ring->active_list)) {
1923                 struct drm_i915_gem_object *obj_priv;
1924
1925                 obj_priv = list_first_entry(&ring->active_list,
1926                                             struct drm_i915_gem_object,
1927                                             ring_list);
1928
1929                 obj_priv->base.write_domain = 0;
1930                 list_del_init(&obj_priv->gpu_write_list);
1931                 i915_gem_object_move_to_inactive(&obj_priv->base);
1932         }
1933 }
1934
1935 void i915_gem_reset(struct drm_device *dev)
1936 {
1937         struct drm_i915_private *dev_priv = dev->dev_private;
1938         struct drm_i915_gem_object *obj_priv;
1939         int i;
1940
1941         i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
1942         i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
1943         i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
1944
1945         /* Remove anything from the flushing lists. The GPU cache is likely
1946          * to be lost on reset along with the data, so simply move the
1947          * lost bo to the inactive list.
1948          */
1949         while (!list_empty(&dev_priv->mm.flushing_list)) {
1950                 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1951                                             struct drm_i915_gem_object,
1952                                             mm_list);
1953
1954                 obj_priv->base.write_domain = 0;
1955                 list_del_init(&obj_priv->gpu_write_list);
1956                 i915_gem_object_move_to_inactive(&obj_priv->base);
1957         }
1958
1959         /* Move everything out of the GPU domains to ensure we do any
1960          * necessary invalidation upon reuse.
1961          */
1962         list_for_each_entry(obj_priv,
1963                             &dev_priv->mm.inactive_list,
1964                             mm_list)
1965         {
1966                 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1967         }
1968
1969         /* The fence registers are invalidated so clear them out */
1970         for (i = 0; i < 16; i++) {
1971                 struct drm_i915_fence_reg *reg;
1972
1973                 reg = &dev_priv->fence_regs[i];
1974                 if (!reg->obj)
1975                         continue;
1976
1977                 i915_gem_clear_fence_reg(reg->obj);
1978         }
1979 }
1980
1981 /**
1982  * This function clears the request list as sequence numbers are passed.
1983  */
1984 static void
1985 i915_gem_retire_requests_ring(struct drm_device *dev,
1986                               struct intel_ring_buffer *ring)
1987 {
1988         drm_i915_private_t *dev_priv = dev->dev_private;
1989         uint32_t seqno;
1990
1991         if (!ring->status_page.page_addr ||
1992             list_empty(&ring->request_list))
1993                 return;
1994
1995         WARN_ON(i915_verify_lists(dev));
1996
1997         seqno = ring->get_seqno(ring);
1998         while (!list_empty(&ring->request_list)) {
1999                 struct drm_i915_gem_request *request;
2000
2001                 request = list_first_entry(&ring->request_list,
2002                                            struct drm_i915_gem_request,
2003                                            list);
2004
2005                 if (!i915_seqno_passed(seqno, request->seqno))
2006                         break;
2007
2008                 trace_i915_gem_request_retire(dev, request->seqno);
2009
2010                 list_del(&request->list);
2011                 i915_gem_request_remove_from_client(request);
2012                 kfree(request);
2013         }
2014
2015         /* Move any buffers on the active list that are no longer referenced
2016          * by the ringbuffer to the flushing/inactive lists as appropriate.
2017          */
2018         while (!list_empty(&ring->active_list)) {
2019                 struct drm_gem_object *obj;
2020                 struct drm_i915_gem_object *obj_priv;
2021
2022                 obj_priv = list_first_entry(&ring->active_list,
2023                                             struct drm_i915_gem_object,
2024                                             ring_list);
2025
2026                 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
2027                         break;
2028
2029                 obj = &obj_priv->base;
2030                 if (obj->write_domain != 0)
2031                         i915_gem_object_move_to_flushing(obj);
2032                 else
2033                         i915_gem_object_move_to_inactive(obj);
2034         }
2035
2036         if (unlikely (dev_priv->trace_irq_seqno &&
2037                       i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
2038                 ring->user_irq_put(ring);
2039                 dev_priv->trace_irq_seqno = 0;
2040         }
2041
2042         WARN_ON(i915_verify_lists(dev));
2043 }
2044
2045 void
2046 i915_gem_retire_requests(struct drm_device *dev)
2047 {
2048         drm_i915_private_t *dev_priv = dev->dev_private;
2049
2050         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
2051             struct drm_i915_gem_object *obj_priv, *tmp;
2052
2053             /* We must be careful that during unbind() we do not
2054              * accidentally infinitely recurse into retire requests.
2055              * Currently:
2056              *   retire -> free -> unbind -> wait -> retire_ring
2057              */
2058             list_for_each_entry_safe(obj_priv, tmp,
2059                                      &dev_priv->mm.deferred_free_list,
2060                                      mm_list)
2061                     i915_gem_free_object_tail(&obj_priv->base);
2062         }
2063
2064         i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
2065         i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
2066         i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
2067 }
2068
2069 static void
2070 i915_gem_retire_work_handler(struct work_struct *work)
2071 {
2072         drm_i915_private_t *dev_priv;
2073         struct drm_device *dev;
2074
2075         dev_priv = container_of(work, drm_i915_private_t,
2076                                 mm.retire_work.work);
2077         dev = dev_priv->dev;
2078
2079         /* Come back later if the device is busy... */
2080         if (!mutex_trylock(&dev->struct_mutex)) {
2081                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2082                 return;
2083         }
2084
2085         i915_gem_retire_requests(dev);
2086
2087         if (!dev_priv->mm.suspended &&
2088                 (!list_empty(&dev_priv->render_ring.request_list) ||
2089                  !list_empty(&dev_priv->bsd_ring.request_list) ||
2090                  !list_empty(&dev_priv->blt_ring.request_list)))
2091                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2092         mutex_unlock(&dev->struct_mutex);
2093 }
2094
2095 int
2096 i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
2097                      bool interruptible, struct intel_ring_buffer *ring)
2098 {
2099         drm_i915_private_t *dev_priv = dev->dev_private;
2100         u32 ier;
2101         int ret = 0;
2102
2103         BUG_ON(seqno == 0);
2104
2105         if (atomic_read(&dev_priv->mm.wedged))
2106                 return -EAGAIN;
2107
2108         if (seqno == ring->outstanding_lazy_request) {
2109                 struct drm_i915_gem_request *request;
2110
2111                 request = kzalloc(sizeof(*request), GFP_KERNEL);
2112                 if (request == NULL)
2113                         return -ENOMEM;
2114
2115                 ret = i915_add_request(dev, NULL, request, ring);
2116                 if (ret) {
2117                         kfree(request);
2118                         return ret;
2119                 }
2120
2121                 seqno = request->seqno;
2122         }
2123
2124         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
2125                 if (HAS_PCH_SPLIT(dev))
2126                         ier = I915_READ(DEIER) | I915_READ(GTIER);
2127                 else
2128                         ier = I915_READ(IER);
2129                 if (!ier) {
2130                         DRM_ERROR("something (likely vbetool) disabled "
2131                                   "interrupts, re-enabling\n");
2132                         i915_driver_irq_preinstall(dev);
2133                         i915_driver_irq_postinstall(dev);
2134                 }
2135
2136                 trace_i915_gem_request_wait_begin(dev, seqno);
2137
2138                 ring->waiting_seqno = seqno;
2139                 ring->user_irq_get(ring);
2140                 if (interruptible)
2141                         ret = wait_event_interruptible(ring->irq_queue,
2142                                 i915_seqno_passed(ring->get_seqno(ring), seqno)
2143                                 || atomic_read(&dev_priv->mm.wedged));
2144                 else
2145                         wait_event(ring->irq_queue,
2146                                 i915_seqno_passed(ring->get_seqno(ring), seqno)
2147                                 || atomic_read(&dev_priv->mm.wedged));
2148
2149                 ring->user_irq_put(ring);
2150                 ring->waiting_seqno = 0;
2151
2152                 trace_i915_gem_request_wait_end(dev, seqno);
2153         }
2154         if (atomic_read(&dev_priv->mm.wedged))
2155                 ret = -EAGAIN;
2156
2157         if (ret && ret != -ERESTARTSYS)
2158                 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2159                           __func__, ret, seqno, ring->get_seqno(ring),
2160                           dev_priv->next_seqno);
2161
2162         /* Directly dispatch request retiring.  While we have the work queue
2163          * to handle this, the waiter on a request often wants an associated
2164          * buffer to have made it to the inactive list, and we would need
2165          * a separate wait queue to handle that.
2166          */
2167         if (ret == 0)
2168                 i915_gem_retire_requests_ring(dev, ring);
2169
2170         return ret;
2171 }
2172
2173 /**
2174  * Waits for a sequence number to be signaled, and cleans up the
2175  * request and object lists appropriately for that event.
2176  */
2177 static int
2178 i915_wait_request(struct drm_device *dev, uint32_t seqno,
2179                   struct intel_ring_buffer *ring)
2180 {
2181         return i915_do_wait_request(dev, seqno, 1, ring);
2182 }
2183
2184 static void
2185 i915_gem_flush_ring(struct drm_device *dev,
2186                     struct drm_file *file_priv,
2187                     struct intel_ring_buffer *ring,
2188                     uint32_t invalidate_domains,
2189                     uint32_t flush_domains)
2190 {
2191         ring->flush(ring, invalidate_domains, flush_domains);
2192         i915_gem_process_flushing_list(dev, flush_domains, ring);
2193 }
2194
2195 static void
2196 i915_gem_flush(struct drm_device *dev,
2197                struct drm_file *file_priv,
2198                uint32_t invalidate_domains,
2199                uint32_t flush_domains,
2200                uint32_t flush_rings)
2201 {
2202         drm_i915_private_t *dev_priv = dev->dev_private;
2203
2204         if (flush_domains & I915_GEM_DOMAIN_CPU)
2205                 drm_agp_chipset_flush(dev);
2206
2207         if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
2208                 if (flush_rings & RING_RENDER)
2209                         i915_gem_flush_ring(dev, file_priv,
2210                                             &dev_priv->render_ring,
2211                                             invalidate_domains, flush_domains);
2212                 if (flush_rings & RING_BSD)
2213                         i915_gem_flush_ring(dev, file_priv,
2214                                             &dev_priv->bsd_ring,
2215                                             invalidate_domains, flush_domains);
2216                 if (flush_rings & RING_BLT)
2217                         i915_gem_flush_ring(dev, file_priv,
2218                                             &dev_priv->blt_ring,
2219                                             invalidate_domains, flush_domains);
2220         }
2221 }
2222
2223 /**
2224  * Ensures that all rendering to the object has completed and the object is
2225  * safe to unbind from the GTT or access from the CPU.
2226  */
2227 static int
2228 i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2229                                bool interruptible)
2230 {
2231         struct drm_device *dev = obj->dev;
2232         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2233         int ret;
2234
2235         /* This function only exists to support waiting for existing rendering,
2236          * not for emitting required flushes.
2237          */
2238         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
2239
2240         /* If there is rendering queued on the buffer being evicted, wait for
2241          * it.
2242          */
2243         if (obj_priv->active) {
2244                 ret = i915_do_wait_request(dev,
2245                                            obj_priv->last_rendering_seqno,
2246                                            interruptible,
2247                                            obj_priv->ring);
2248                 if (ret)
2249                         return ret;
2250         }
2251
2252         return 0;
2253 }
2254
2255 /**
2256  * Unbinds an object from the GTT aperture.
2257  */
2258 int
2259 i915_gem_object_unbind(struct drm_gem_object *obj)
2260 {
2261         struct drm_device *dev = obj->dev;
2262         struct drm_i915_private *dev_priv = dev->dev_private;
2263         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2264         int ret = 0;
2265
2266         if (obj_priv->gtt_space == NULL)
2267                 return 0;
2268
2269         if (obj_priv->pin_count != 0) {
2270                 DRM_ERROR("Attempting to unbind pinned buffer\n");
2271                 return -EINVAL;
2272         }
2273
2274         /* blow away mappings if mapped through GTT */
2275         i915_gem_release_mmap(obj);
2276
2277         /* Move the object to the CPU domain to ensure that
2278          * any possible CPU writes while it's not in the GTT
2279          * are flushed when we go to remap it. This will
2280          * also ensure that all pending GPU writes are finished
2281          * before we unbind.
2282          */
2283         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2284         if (ret == -ERESTARTSYS)
2285                 return ret;
2286         /* Continue on if we fail due to EIO, the GPU is hung so we
2287          * should be safe and we need to cleanup or else we might
2288          * cause memory corruption through use-after-free.
2289          */
2290         if (ret) {
2291                 i915_gem_clflush_object(obj);
2292                 obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
2293         }
2294
2295         /* release the fence reg _after_ flushing */
2296         if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2297                 i915_gem_clear_fence_reg(obj);
2298
2299         drm_unbind_agp(obj_priv->agp_mem);
2300         drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2301
2302         i915_gem_object_put_pages_gtt(obj);
2303
2304         i915_gem_info_remove_gtt(dev_priv, obj_priv);
2305         list_del_init(&obj_priv->mm_list);
2306         /* Avoid an unnecessary call to unbind on rebind. */
2307         obj_priv->map_and_fenceable = true;
2308
2309         drm_mm_put_block(obj_priv->gtt_space);
2310         obj_priv->gtt_space = NULL;
2311         obj_priv->gtt_offset = 0;
2312
2313         if (i915_gem_object_is_purgeable(obj_priv))
2314                 i915_gem_object_truncate(obj);
2315
2316         trace_i915_gem_object_unbind(obj);
2317
2318         return ret;
2319 }
2320
2321 static int i915_ring_idle(struct drm_device *dev,
2322                           struct intel_ring_buffer *ring)
2323 {
2324         if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2325                 return 0;
2326
2327         i915_gem_flush_ring(dev, NULL, ring,
2328                             I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2329         return i915_wait_request(dev,
2330                                  i915_gem_next_request_seqno(dev, ring),
2331                                  ring);
2332 }
2333
2334 int
2335 i915_gpu_idle(struct drm_device *dev)
2336 {
2337         drm_i915_private_t *dev_priv = dev->dev_private;
2338         bool lists_empty;
2339         int ret;
2340
2341         lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2342                        list_empty(&dev_priv->mm.active_list));
2343         if (lists_empty)
2344                 return 0;
2345
2346         /* Flush everything onto the inactive list. */
2347         ret = i915_ring_idle(dev, &dev_priv->render_ring);
2348         if (ret)
2349                 return ret;
2350
2351         ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2352         if (ret)
2353                 return ret;
2354
2355         ret = i915_ring_idle(dev, &dev_priv->blt_ring);
2356         if (ret)
2357                 return ret;
2358
2359         return 0;
2360 }
2361
2362 static void sandybridge_write_fence_reg(struct drm_gem_object *obj)
2363 {
2364         struct drm_device *dev = obj->dev;
2365         drm_i915_private_t *dev_priv = dev->dev_private;
2366         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2367         u32 size = i915_gem_get_gtt_size(obj_priv);
2368         int regnum = obj_priv->fence_reg;
2369         uint64_t val;
2370
2371         val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
2372                     0xfffff000) << 32;
2373         val |= obj_priv->gtt_offset & 0xfffff000;
2374         val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2375                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2376
2377         if (obj_priv->tiling_mode == I915_TILING_Y)
2378                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2379         val |= I965_FENCE_REG_VALID;
2380
2381         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2382 }
2383
2384 static void i965_write_fence_reg(struct drm_gem_object *obj)
2385 {
2386         struct drm_device *dev = obj->dev;
2387         drm_i915_private_t *dev_priv = dev->dev_private;
2388         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2389         u32 size = i915_gem_get_gtt_size(obj_priv);
2390         int regnum = obj_priv->fence_reg;
2391         uint64_t val;
2392
2393         val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
2394                     0xfffff000) << 32;
2395         val |= obj_priv->gtt_offset & 0xfffff000;
2396         val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2397         if (obj_priv->tiling_mode == I915_TILING_Y)
2398                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2399         val |= I965_FENCE_REG_VALID;
2400
2401         I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2402 }
2403
2404 static void i915_write_fence_reg(struct drm_gem_object *obj)
2405 {
2406         struct drm_device *dev = obj->dev;
2407         drm_i915_private_t *dev_priv = dev->dev_private;
2408         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2409         u32 size = i915_gem_get_gtt_size(obj_priv);
2410         uint32_t fence_reg, val, pitch_val;
2411         int tile_width;
2412
2413         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2414             (obj_priv->gtt_offset & (size - 1))) {
2415                 WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
2416                      __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size,
2417                      obj_priv->gtt_space->start, obj_priv->gtt_space->size);
2418                 return;
2419         }
2420
2421         if (obj_priv->tiling_mode == I915_TILING_Y &&
2422             HAS_128_BYTE_Y_TILING(dev))
2423                 tile_width = 128;
2424         else
2425                 tile_width = 512;
2426
2427         /* Note: pitch better be a power of two tile widths */
2428         pitch_val = obj_priv->stride / tile_width;
2429         pitch_val = ffs(pitch_val) - 1;
2430
2431         if (obj_priv->tiling_mode == I915_TILING_Y &&
2432             HAS_128_BYTE_Y_TILING(dev))
2433                 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2434         else
2435                 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2436
2437         val = obj_priv->gtt_offset;
2438         if (obj_priv->tiling_mode == I915_TILING_Y)
2439                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2440         val |= I915_FENCE_SIZE_BITS(size);
2441         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2442         val |= I830_FENCE_REG_VALID;
2443
2444         fence_reg = obj_priv->fence_reg;
2445         if (fence_reg < 8)
2446                 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2447         else
2448                 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2449         I915_WRITE(fence_reg, val);
2450 }
2451
2452 static void i830_write_fence_reg(struct drm_gem_object *obj)
2453 {
2454         struct drm_device *dev = obj->dev;
2455         drm_i915_private_t *dev_priv = dev->dev_private;
2456         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2457         u32 size = i915_gem_get_gtt_size(obj_priv);
2458         int regnum = obj_priv->fence_reg;
2459         uint32_t val;
2460         uint32_t pitch_val;
2461         uint32_t fence_size_bits;
2462
2463         if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2464             (obj_priv->gtt_offset & (obj->size - 1))) {
2465                 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2466                      __func__, obj_priv->gtt_offset);
2467                 return;
2468         }
2469
2470         pitch_val = obj_priv->stride / 128;
2471         pitch_val = ffs(pitch_val) - 1;
2472         WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2473
2474         val = obj_priv->gtt_offset;
2475         if (obj_priv->tiling_mode == I915_TILING_Y)
2476                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2477         fence_size_bits = I830_FENCE_SIZE_BITS(size);
2478         WARN_ON(fence_size_bits & ~0x00000f00);
2479         val |= fence_size_bits;
2480         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2481         val |= I830_FENCE_REG_VALID;
2482
2483         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2484 }
2485
2486 static int i915_find_fence_reg(struct drm_device *dev,
2487                                bool interruptible)
2488 {
2489         struct drm_i915_private *dev_priv = dev->dev_private;
2490         struct drm_i915_fence_reg *reg;
2491         struct drm_i915_gem_object *obj_priv = NULL;
2492         int i, avail, ret;
2493
2494         /* First try to find a free reg */
2495         avail = 0;
2496         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2497                 reg = &dev_priv->fence_regs[i];
2498                 if (!reg->obj)
2499                         return i;
2500
2501                 obj_priv = to_intel_bo(reg->obj);
2502                 if (!obj_priv->pin_count)
2503                     avail++;
2504         }
2505
2506         if (avail == 0)
2507                 return -ENOSPC;
2508
2509         /* None available, try to steal one or wait for a user to finish */
2510         avail = I915_FENCE_REG_NONE;
2511         list_for_each_entry(reg, &dev_priv->mm.fence_list,
2512                             lru_list) {
2513                 obj_priv = to_intel_bo(reg->obj);
2514                 if (obj_priv->pin_count)
2515                         continue;
2516
2517                 /* found one! */
2518                 avail = obj_priv->fence_reg;
2519                 break;
2520         }
2521
2522         BUG_ON(avail == I915_FENCE_REG_NONE);
2523
2524         /* We only have a reference on obj from the active list. put_fence_reg
2525          * might drop that one, causing a use-after-free in it. So hold a
2526          * private reference to obj like the other callers of put_fence_reg
2527          * (set_tiling ioctl) do. */
2528         drm_gem_object_reference(&obj_priv->base);
2529         ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible);
2530         drm_gem_object_unreference(&obj_priv->base);
2531         if (ret != 0)
2532                 return ret;
2533
2534         return avail;
2535 }
2536
2537 /**
2538  * i915_gem_object_get_fence_reg - set up a fence reg for an object
2539  * @obj: object to map through a fence reg
2540  *
2541  * When mapping objects through the GTT, userspace wants to be able to write
2542  * to them without having to worry about swizzling if the object is tiled.
2543  *
2544  * This function walks the fence regs looking for a free one for @obj,
2545  * stealing one if it can't find any.
2546  *
2547  * It then sets up the reg based on the object's properties: address, pitch
2548  * and tiling format.
2549  */
2550 int
2551 i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2552                               bool interruptible)
2553 {
2554         struct drm_device *dev = obj->dev;
2555         struct drm_i915_private *dev_priv = dev->dev_private;
2556         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2557         struct drm_i915_fence_reg *reg = NULL;
2558         int ret;
2559
2560         /* Just update our place in the LRU if our fence is getting used. */
2561         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2562                 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2563                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2564                 return 0;
2565         }
2566
2567         switch (obj_priv->tiling_mode) {
2568         case I915_TILING_NONE:
2569                 WARN(1, "allocating a fence for non-tiled object?\n");
2570                 break;
2571         case I915_TILING_X:
2572                 if (!obj_priv->stride)
2573                         return -EINVAL;
2574                 WARN((obj_priv->stride & (512 - 1)),
2575                      "object 0x%08x is X tiled but has non-512B pitch\n",
2576                      obj_priv->gtt_offset);
2577                 break;
2578         case I915_TILING_Y:
2579                 if (!obj_priv->stride)
2580                         return -EINVAL;
2581                 WARN((obj_priv->stride & (128 - 1)),
2582                      "object 0x%08x is Y tiled but has non-128B pitch\n",
2583                      obj_priv->gtt_offset);
2584                 break;
2585         }
2586
2587         ret = i915_find_fence_reg(dev, interruptible);
2588         if (ret < 0)
2589                 return ret;
2590
2591         obj_priv->fence_reg = ret;
2592         reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2593         list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2594
2595         reg->obj = obj;
2596
2597         switch (INTEL_INFO(dev)->gen) {
2598         case 6:
2599                 sandybridge_write_fence_reg(obj);
2600                 break;
2601         case 5:
2602         case 4:
2603                 i965_write_fence_reg(obj);
2604                 break;
2605         case 3:
2606                 i915_write_fence_reg(obj);
2607                 break;
2608         case 2:
2609                 i830_write_fence_reg(obj);
2610                 break;
2611         }
2612
2613         trace_i915_gem_object_get_fence(obj,
2614                                         obj_priv->fence_reg,
2615                                         obj_priv->tiling_mode);
2616
2617         return 0;
2618 }
2619
2620 /**
2621  * i915_gem_clear_fence_reg - clear out fence register info
2622  * @obj: object to clear
2623  *
2624  * Zeroes out the fence register itself and clears out the associated
2625  * data structures in dev_priv and obj_priv.
2626  */
2627 static void
2628 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2629 {
2630         struct drm_device *dev = obj->dev;
2631         drm_i915_private_t *dev_priv = dev->dev_private;
2632         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2633         struct drm_i915_fence_reg *reg =
2634                 &dev_priv->fence_regs[obj_priv->fence_reg];
2635         uint32_t fence_reg;
2636
2637         switch (INTEL_INFO(dev)->gen) {
2638         case 6:
2639                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2640                              (obj_priv->fence_reg * 8), 0);
2641                 break;
2642         case 5:
2643         case 4:
2644                 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2645                 break;
2646         case 3:
2647                 if (obj_priv->fence_reg >= 8)
2648                         fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2649                 else
2650         case 2:
2651                         fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2652
2653                 I915_WRITE(fence_reg, 0);
2654                 break;
2655         }
2656
2657         reg->obj = NULL;
2658         obj_priv->fence_reg = I915_FENCE_REG_NONE;
2659         list_del_init(&reg->lru_list);
2660 }
2661
2662 /**
2663  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2664  * to the buffer to finish, and then resets the fence register.
2665  * @obj: tiled object holding a fence register.
2666  * @bool: whether the wait upon the fence is interruptible
2667  *
2668  * Zeroes out the fence register itself and clears out the associated
2669  * data structures in dev_priv and obj_priv.
2670  */
2671 int
2672 i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2673                               bool interruptible)
2674 {
2675         struct drm_device *dev = obj->dev;
2676         struct drm_i915_private *dev_priv = dev->dev_private;
2677         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2678         struct drm_i915_fence_reg *reg;
2679
2680         if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2681                 return 0;
2682
2683         /* If we've changed tiling, GTT-mappings of the object
2684          * need to re-fault to ensure that the correct fence register
2685          * setup is in place.
2686          */
2687         i915_gem_release_mmap(obj);
2688
2689         /* On the i915, GPU access to tiled buffers is via a fence,
2690          * therefore we must wait for any outstanding access to complete
2691          * before clearing the fence.
2692          */
2693         reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2694         if (reg->gpu) {
2695                 int ret;
2696
2697                 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2698                 if (ret)
2699                         return ret;
2700
2701                 ret = i915_gem_object_wait_rendering(obj, interruptible);
2702                 if (ret)
2703                         return ret;
2704
2705                 reg->gpu = false;
2706         }
2707
2708         i915_gem_object_flush_gtt_write_domain(obj);
2709         i915_gem_clear_fence_reg(obj);
2710
2711         return 0;
2712 }
2713
2714 /**
2715  * Finds free space in the GTT aperture and binds the object there.
2716  */
2717 static int
2718 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2719                             unsigned alignment,
2720                             bool map_and_fenceable)
2721 {
2722         struct drm_device *dev = obj->dev;
2723         drm_i915_private_t *dev_priv = dev->dev_private;
2724         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2725         struct drm_mm_node *free_space;
2726         gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2727         u32 size, fence_size, fence_alignment, unfenced_alignment;
2728         bool mappable, fenceable;
2729         int ret;
2730
2731         if (obj_priv->madv != I915_MADV_WILLNEED) {
2732                 DRM_ERROR("Attempting to bind a purgeable object\n");
2733                 return -EINVAL;
2734         }
2735
2736         fence_size = i915_gem_get_gtt_size(obj_priv);
2737         fence_alignment = i915_gem_get_gtt_alignment(obj_priv);
2738         unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv);
2739
2740         if (alignment == 0)
2741                 alignment = map_and_fenceable ? fence_alignment :
2742                                                 unfenced_alignment;
2743         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2744                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2745                 return -EINVAL;
2746         }
2747
2748         size = map_and_fenceable ? fence_size : obj->size;
2749
2750         /* If the object is bigger than the entire aperture, reject it early
2751          * before evicting everything in a vain attempt to find space.
2752          */
2753         if (obj->size >
2754             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2755                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2756                 return -E2BIG;
2757         }
2758
2759  search_free:
2760         if (map_and_fenceable)
2761                 free_space =
2762                         drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2763                                                     size, alignment, 0,
2764                                                     dev_priv->mm.gtt_mappable_end,
2765                                                     0);
2766         else
2767                 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2768                                                 size, alignment, 0);
2769
2770         if (free_space != NULL) {
2771                 if (map_and_fenceable)
2772                         obj_priv->gtt_space =
2773                                 drm_mm_get_block_range_generic(free_space,
2774                                                                size, alignment, 0,
2775                                                                dev_priv->mm.gtt_mappable_end,
2776                                                                0);
2777                 else
2778                         obj_priv->gtt_space =
2779                                 drm_mm_get_block(free_space, size, alignment);
2780         }
2781         if (obj_priv->gtt_space == NULL) {
2782                 /* If the gtt is empty and we're still having trouble
2783                  * fitting our object in, we're out of memory.
2784                  */
2785                 ret = i915_gem_evict_something(dev, size, alignment,
2786                                                map_and_fenceable);
2787                 if (ret)
2788                         return ret;
2789
2790                 goto search_free;
2791         }
2792
2793         ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2794         if (ret) {
2795                 drm_mm_put_block(obj_priv->gtt_space);
2796                 obj_priv->gtt_space = NULL;
2797
2798                 if (ret == -ENOMEM) {
2799                         /* first try to clear up some space from the GTT */
2800                         ret = i915_gem_evict_something(dev, size,
2801                                                        alignment,
2802                                                        map_and_fenceable);
2803                         if (ret) {
2804                                 /* now try to shrink everyone else */
2805                                 if (gfpmask) {
2806                                         gfpmask = 0;
2807                                         goto search_free;
2808                                 }
2809
2810                                 return ret;
2811                         }
2812
2813                         goto search_free;
2814                 }
2815
2816                 return ret;
2817         }
2818
2819         /* Create an AGP memory structure pointing at our pages, and bind it
2820          * into the GTT.
2821          */
2822         obj_priv->agp_mem = drm_agp_bind_pages(dev,
2823                                                obj_priv->pages,
2824                                                obj->size >> PAGE_SHIFT,
2825                                                obj_priv->gtt_space->start,
2826                                                obj_priv->agp_type);
2827         if (obj_priv->agp_mem == NULL) {
2828                 i915_gem_object_put_pages_gtt(obj);
2829                 drm_mm_put_block(obj_priv->gtt_space);
2830                 obj_priv->gtt_space = NULL;
2831
2832                 ret = i915_gem_evict_something(dev, size,
2833                                                alignment, map_and_fenceable);
2834                 if (ret)
2835                         return ret;
2836
2837                 goto search_free;
2838         }
2839
2840         obj_priv->gtt_offset = obj_priv->gtt_space->start;
2841
2842         /* keep track of bounds object by adding it to the inactive list */
2843         list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
2844         i915_gem_info_add_gtt(dev_priv, obj_priv);
2845
2846         /* Assert that the object is not currently in any GPU domain. As it
2847          * wasn't in the GTT, there shouldn't be any way it could have been in
2848          * a GPU cache
2849          */
2850         BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2851         BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2852
2853         trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable);
2854
2855         fenceable =
2856                 obj_priv->gtt_space->size == fence_size &&
2857                 (obj_priv->gtt_space->start & (fence_alignment -1)) == 0;
2858
2859         mappable =
2860                 obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end;
2861
2862         obj_priv->map_and_fenceable = mappable && fenceable;
2863
2864         return 0;
2865 }
2866
2867 void
2868 i915_gem_clflush_object(struct drm_gem_object *obj)
2869 {
2870         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
2871
2872         /* If we don't have a page list set up, then we're not pinned
2873          * to GPU, and we can ignore the cache flush because it'll happen
2874          * again at bind time.
2875          */
2876         if (obj_priv->pages == NULL)
2877                 return;
2878
2879         trace_i915_gem_object_clflush(obj);
2880
2881         drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2882 }
2883
2884 /** Flushes any GPU write domain for the object if it's dirty. */
2885 static int
2886 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2887                                        bool pipelined)
2888 {
2889         struct drm_device *dev = obj->dev;
2890
2891         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2892                 return 0;
2893
2894         /* Queue the GPU write cache flushing we need. */
2895         i915_gem_flush_ring(dev, NULL,
2896                             to_intel_bo(obj)->ring,
2897                             0, obj->write_domain);
2898         BUG_ON(obj->write_domain);
2899
2900         if (pipelined)
2901                 return 0;
2902
2903         return i915_gem_object_wait_rendering(obj, true);
2904 }
2905
2906 /** Flushes the GTT write domain for the object if it's dirty. */
2907 static void
2908 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2909 {
2910         uint32_t old_write_domain;
2911
2912         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2913                 return;
2914
2915         /* No actual flushing is required for the GTT write domain.   Writes
2916          * to it immediately go to main memory as far as we know, so there's
2917          * no chipset flush.  It also doesn't land in render cache.
2918          */
2919         i915_gem_release_mmap(obj);
2920
2921         old_write_domain = obj->write_domain;
2922         obj->write_domain = 0;
2923
2924         trace_i915_gem_object_change_domain(obj,
2925                                             obj->read_domains,
2926                                             old_write_domain);
2927 }
2928
2929 /** Flushes the CPU write domain for the object if it's dirty. */
2930 static void
2931 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2932 {
2933         struct drm_device *dev = obj->dev;
2934         uint32_t old_write_domain;
2935
2936         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2937                 return;
2938
2939         i915_gem_clflush_object(obj);
2940         drm_agp_chipset_flush(dev);
2941         old_write_domain = obj->write_domain;
2942         obj->write_domain = 0;
2943
2944         trace_i915_gem_object_change_domain(obj,
2945                                             obj->read_domains,
2946                                             old_write_domain);
2947 }
2948
2949 /**
2950  * Moves a single object to the GTT read, and possibly write domain.
2951  *
2952  * This function returns when the move is complete, including waiting on
2953  * flushes to occur.
2954  */
2955 int
2956 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2957 {
2958         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2959         uint32_t old_write_domain, old_read_domains;
2960         int ret;
2961
2962         /* Not valid to be called on unbound objects. */
2963         if (obj_priv->gtt_space == NULL)
2964                 return -EINVAL;
2965
2966         ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2967         if (ret != 0)
2968                 return ret;
2969
2970         i915_gem_object_flush_cpu_write_domain(obj);
2971
2972         if (write) {
2973                 ret = i915_gem_object_wait_rendering(obj, true);
2974                 if (ret)
2975                         return ret;
2976         }
2977
2978         old_write_domain = obj->write_domain;
2979         old_read_domains = obj->read_domains;
2980
2981         /* It should now be out of any other write domains, and we can update
2982          * the domain values for our changes.
2983          */
2984         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2985         obj->read_domains |= I915_GEM_DOMAIN_GTT;
2986         if (write) {
2987                 obj->read_domains = I915_GEM_DOMAIN_GTT;
2988                 obj->write_domain = I915_GEM_DOMAIN_GTT;
2989                 obj_priv->dirty = 1;
2990         }
2991
2992         trace_i915_gem_object_change_domain(obj,
2993                                             old_read_domains,
2994                                             old_write_domain);
2995
2996         return 0;
2997 }
2998
2999 /*
3000  * Prepare buffer for display plane. Use uninterruptible for possible flush
3001  * wait, as in modesetting process we're not supposed to be interrupted.
3002  */
3003 int
3004 i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
3005                                      bool pipelined)
3006 {
3007         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3008         uint32_t old_read_domains;
3009         int ret;
3010
3011         /* Not valid to be called on unbound objects. */
3012         if (obj_priv->gtt_space == NULL)
3013                 return -EINVAL;
3014
3015         ret = i915_gem_object_flush_gpu_write_domain(obj, true);
3016         if (ret)
3017                 return ret;
3018
3019         /* Currently, we are always called from an non-interruptible context. */
3020         if (!pipelined) {
3021                 ret = i915_gem_object_wait_rendering(obj, false);
3022                 if (ret)
3023                         return ret;
3024         }
3025
3026         i915_gem_object_flush_cpu_write_domain(obj);
3027
3028         old_read_domains = obj->read_domains;
3029         obj->read_domains |= I915_GEM_DOMAIN_GTT;
3030
3031         trace_i915_gem_object_change_domain(obj,
3032                                             old_read_domains,
3033                                             obj->write_domain);
3034
3035         return 0;
3036 }
3037
3038 int
3039 i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
3040                           bool interruptible)
3041 {
3042         if (!obj->active)
3043                 return 0;
3044
3045         if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
3046                 i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
3047                                     0, obj->base.write_domain);
3048
3049         return i915_gem_object_wait_rendering(&obj->base, interruptible);
3050 }
3051
3052 /**
3053  * Moves a single object to the CPU read, and possibly write domain.
3054  *
3055  * This function returns when the move is complete, including waiting on
3056  * flushes to occur.
3057  */
3058 static int
3059 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
3060 {
3061         uint32_t old_write_domain, old_read_domains;
3062         int ret;
3063
3064         ret = i915_gem_object_flush_gpu_write_domain(obj, false);
3065         if (ret != 0)
3066                 return ret;
3067
3068         i915_gem_object_flush_gtt_write_domain(obj);
3069
3070         /* If we have a partially-valid cache of the object in the CPU,
3071          * finish invalidating it and free the per-page flags.
3072          */
3073         i915_gem_object_set_to_full_cpu_read_domain(obj);
3074
3075         if (write) {
3076                 ret = i915_gem_object_wait_rendering(obj, true);
3077                 if (ret)
3078                         return ret;
3079         }
3080
3081         old_write_domain = obj->write_domain;
3082         old_read_domains = obj->read_domains;
3083
3084         /* Flush the CPU cache if it's still invalid. */
3085         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3086                 i915_gem_clflush_object(obj);
3087
3088                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3089         }
3090
3091         /* It should now be out of any other write domains, and we can update
3092          * the domain values for our changes.
3093          */
3094         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3095
3096         /* If we're writing through the CPU, then the GPU read domains will
3097          * need to be invalidated at next use.
3098          */
3099         if (write) {
3100                 obj->read_domains = I915_GEM_DOMAIN_CPU;
3101                 obj->write_domain = I915_GEM_DOMAIN_CPU;
3102         }
3103
3104         trace_i915_gem_object_change_domain(obj,
3105                                             old_read_domains,
3106                                             old_write_domain);
3107
3108         return 0;
3109 }
3110
3111 /*
3112  * Set the next domain for the specified object. This
3113  * may not actually perform the necessary flushing/invaliding though,
3114  * as that may want to be batched with other set_domain operations
3115  *
3116  * This is (we hope) the only really tricky part of gem. The goal
3117  * is fairly simple -- track which caches hold bits of the object
3118  * and make sure they remain coherent. A few concrete examples may
3119  * help to explain how it works. For shorthand, we use the notation
3120  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
3121  * a pair of read and write domain masks.
3122  *
3123  * Case 1: the batch buffer
3124  *
3125  *      1. Allocated
3126  *      2. Written by CPU
3127  *      3. Mapped to GTT
3128  *      4. Read by GPU
3129  *      5. Unmapped from GTT
3130  *      6. Freed
3131  *
3132  *      Let's take these a step at a time
3133  *
3134  *      1. Allocated
3135  *              Pages allocated from the kernel may still have
3136  *              cache contents, so we set them to (CPU, CPU) always.
3137  *      2. Written by CPU (using pwrite)
3138  *              The pwrite function calls set_domain (CPU, CPU) and
3139  *              this function does nothing (as nothing changes)
3140  *      3. Mapped by GTT
3141  *              This function asserts that the object is not
3142  *              currently in any GPU-based read or write domains
3143  *      4. Read by GPU
3144  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
3145  *              As write_domain is zero, this function adds in the
3146  *              current read domains (CPU+COMMAND, 0).
3147  *              flush_domains is set to CPU.
3148  *              invalidate_domains is set to COMMAND
3149  *              clflush is run to get data out of the CPU caches
3150  *              then i915_dev_set_domain calls i915_gem_flush to
3151  *              emit an MI_FLUSH and drm_agp_chipset_flush
3152  *      5. Unmapped from GTT
3153  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
3154  *              flush_domains and invalidate_domains end up both zero
3155  *              so no flushing/invalidating happens
3156  *      6. Freed
3157  *              yay, done
3158  *
3159  * Case 2: The shared render buffer
3160  *
3161  *      1. Allocated
3162  *      2. Mapped to GTT
3163  *      3. Read/written by GPU
3164  *      4. set_domain to (CPU,CPU)
3165  *      5. Read/written by CPU
3166  *      6. Read/written by GPU
3167  *
3168  *      1. Allocated
3169  *              Same as last example, (CPU, CPU)
3170  *      2. Mapped to GTT
3171  *              Nothing changes (assertions find that it is not in the GPU)
3172  *      3. Read/written by GPU
3173  *              execbuffer calls set_domain (RENDER, RENDER)
3174  *              flush_domains gets CPU
3175  *              invalidate_domains gets GPU
3176  *              clflush (obj)
3177  *              MI_FLUSH and drm_agp_chipset_flush
3178  *      4. set_domain (CPU, CPU)
3179  *              flush_domains gets GPU
3180  *              invalidate_domains gets CPU
3181  *              wait_rendering (obj) to make sure all drawing is complete.
3182  *              This will include an MI_FLUSH to get the data from GPU
3183  *              to memory
3184  *              clflush (obj) to invalidate the CPU cache
3185  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3186  *      5. Read/written by CPU
3187  *              cache lines are loaded and dirtied
3188  *      6. Read written by GPU
3189  *              Same as last GPU access
3190  *
3191  * Case 3: The constant buffer
3192  *
3193  *      1. Allocated
3194  *      2. Written by CPU
3195  *      3. Read by GPU
3196  *      4. Updated (written) by CPU again
3197  *      5. Read by GPU
3198  *
3199  *      1. Allocated
3200  *              (CPU, CPU)
3201  *      2. Written by CPU
3202  *              (CPU, CPU)
3203  *      3. Read by GPU
3204  *              (CPU+RENDER, 0)
3205  *              flush_domains = CPU
3206  *              invalidate_domains = RENDER
3207  *              clflush (obj)
3208  *              MI_FLUSH
3209  *              drm_agp_chipset_flush
3210  *      4. Updated (written) by CPU again
3211  *              (CPU, CPU)
3212  *              flush_domains = 0 (no previous write domain)
3213  *              invalidate_domains = 0 (no new read domains)
3214  *      5. Read by GPU
3215  *              (CPU+RENDER, 0)
3216  *              flush_domains = CPU
3217  *              invalidate_domains = RENDER
3218  *              clflush (obj)
3219  *              MI_FLUSH
3220  *              drm_agp_chipset_flush
3221  */
3222 static void
3223 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3224                                   struct intel_ring_buffer *ring,
3225                                   struct change_domains *cd)
3226 {
3227         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
3228         uint32_t                        invalidate_domains = 0;
3229         uint32_t                        flush_domains = 0;
3230
3231         /*
3232          * If the object isn't moving to a new write domain,
3233          * let the object stay in multiple read domains
3234          */
3235         if (obj->pending_write_domain == 0)
3236                 obj->pending_read_domains |= obj->read_domains;
3237
3238         /*
3239          * Flush the current write domain if
3240          * the new read domains don't match. Invalidate
3241          * any read domains which differ from the old
3242          * write domain
3243          */
3244         if (obj->write_domain &&
3245             (obj->write_domain != obj->pending_read_domains ||
3246              obj_priv->ring != ring)) {
3247                 flush_domains |= obj->write_domain;
3248                 invalidate_domains |=
3249                         obj->pending_read_domains & ~obj->write_domain;
3250         }
3251         /*
3252          * Invalidate any read caches which may have
3253          * stale data. That is, any new read domains.
3254          */
3255         invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3256         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
3257                 i915_gem_clflush_object(obj);
3258
3259         /* blow away mappings if mapped through GTT */
3260         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
3261                 i915_gem_release_mmap(obj);
3262
3263         /* The actual obj->write_domain will be updated with
3264          * pending_write_domain after we emit the accumulated flush for all
3265          * of our domain changes in execbuffers (which clears objects'
3266          * write_domains).  So if we have a current write domain that we
3267          * aren't changing, set pending_write_domain to that.
3268          */
3269         if (flush_domains == 0 && obj->pending_write_domain == 0)
3270                 obj->pending_write_domain = obj->write_domain;
3271
3272         cd->invalidate_domains |= invalidate_domains;
3273         cd->flush_domains |= flush_domains;
3274         if (flush_domains & I915_GEM_GPU_DOMAINS)
3275                 cd->flush_rings |= obj_priv->ring->id;
3276         if (invalidate_domains & I915_GEM_GPU_DOMAINS)
3277                 cd->flush_rings |= ring->id;
3278 }
3279
3280 /**
3281  * Moves the object from a partially CPU read to a full one.
3282  *
3283  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3284  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3285  */
3286 static void
3287 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3288 {
3289         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3290
3291         if (!obj_priv->page_cpu_valid)
3292                 return;
3293
3294         /* If we're partially in the CPU read domain, finish moving it in.
3295          */
3296         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3297                 int i;
3298
3299                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3300                         if (obj_priv->page_cpu_valid[i])
3301                                 continue;
3302                         drm_clflush_pages(obj_priv->pages + i, 1);
3303                 }
3304         }
3305
3306         /* Free the page_cpu_valid mappings which are now stale, whether
3307          * or not we've got I915_GEM_DOMAIN_CPU.
3308          */
3309         kfree(obj_priv->page_cpu_valid);
3310         obj_priv->page_cpu_valid = NULL;
3311 }
3312
3313 /**
3314  * Set the CPU read domain on a range of the object.
3315  *
3316  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3317  * not entirely valid.  The page_cpu_valid member of the object flags which
3318  * pages have been flushed, and will be respected by
3319  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3320  * of the whole object.
3321  *
3322  * This function returns when the move is complete, including waiting on
3323  * flushes to occur.
3324  */
3325 static int
3326 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3327                                           uint64_t offset, uint64_t size)
3328 {
3329         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3330         uint32_t old_read_domains;
3331         int i, ret;
3332
3333         if (offset == 0 && size == obj->size)
3334                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3335
3336         ret = i915_gem_object_flush_gpu_write_domain(obj, false);
3337         if (ret != 0)
3338                 return ret;
3339         i915_gem_object_flush_gtt_write_domain(obj);
3340
3341         /* If we're already fully in the CPU read domain, we're done. */
3342         if (obj_priv->page_cpu_valid == NULL &&
3343             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3344                 return 0;
3345
3346         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3347          * newly adding I915_GEM_DOMAIN_CPU
3348          */
3349         if (obj_priv->page_cpu_valid == NULL) {
3350                 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3351                                                    GFP_KERNEL);
3352                 if (obj_priv->page_cpu_valid == NULL)
3353                         return -ENOMEM;
3354         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3355                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3356
3357         /* Flush the cache on any pages that are still invalid from the CPU's
3358          * perspective.
3359          */
3360         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3361              i++) {
3362                 if (obj_priv->page_cpu_valid[i])
3363                         continue;
3364
3365                 drm_clflush_pages(obj_priv->pages + i, 1);
3366
3367                 obj_priv->page_cpu_valid[i] = 1;
3368         }
3369
3370         /* It should now be out of any other write domains, and we can update
3371          * the domain values for our changes.
3372          */
3373         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3374
3375         old_read_domains = obj->read_domains;
3376         obj->read_domains |= I915_GEM_DOMAIN_CPU;
3377
3378         trace_i915_gem_object_change_domain(obj,
3379                                             old_read_domains,
3380                                             obj->write_domain);
3381
3382         return 0;
3383 }
3384
3385 /**
3386  * Pin an object to the GTT and evaluate the relocations landing in it.
3387  */
3388 static int
3389 i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
3390                              struct drm_file *file_priv,
3391                              struct drm_i915_gem_exec_object2 *entry)
3392 {
3393         struct drm_device *dev = obj->base.dev;
3394         drm_i915_private_t *dev_priv = dev->dev_private;
3395         struct drm_i915_gem_relocation_entry __user *user_relocs;
3396         struct drm_gem_object *target_obj = NULL;
3397         uint32_t target_handle = 0;
3398         int i, ret = 0;
3399
3400         user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3401         for (i = 0; i < entry->relocation_count; i++) {
3402                 struct drm_i915_gem_relocation_entry reloc;
3403                 uint32_t target_offset;
3404
3405                 if (__copy_from_user_inatomic(&reloc,
3406                                               user_relocs+i,
3407                                               sizeof(reloc))) {
3408                         ret = -EFAULT;
3409                         break;
3410                 }
3411
3412                 if (reloc.target_handle != target_handle) {
3413                         drm_gem_object_unreference(target_obj);
3414
3415                         target_obj = drm_gem_object_lookup(dev, file_priv,
3416                                                            reloc.target_handle);
3417                         if (target_obj == NULL) {
3418                                 ret = -ENOENT;
3419                                 break;
3420                         }
3421
3422                         target_handle = reloc.target_handle;
3423                 }
3424                 target_offset = to_intel_bo(target_obj)->gtt_offset;
3425
3426 #if WATCH_RELOC
3427                 DRM_INFO("%s: obj %p offset %08x target %d "
3428                          "read %08x write %08x gtt %08x "
3429                          "presumed %08x delta %08x\n",
3430                          __func__,
3431                          obj,
3432                          (int) reloc.offset,
3433                          (int) reloc.target_handle,
3434                          (int) reloc.read_domains,
3435                          (int) reloc.write_domain,
3436                          (int) target_offset,
3437                          (int) reloc.presumed_offset,
3438                          reloc.delta);
3439 #endif
3440
3441                 /* The target buffer should have appeared before us in the
3442                  * exec_object list, so it should have a GTT space bound by now.
3443                  */
3444                 if (target_offset == 0) {
3445                         DRM_ERROR("No GTT space found for object %d\n",
3446                                   reloc.target_handle);
3447                         ret = -EINVAL;
3448                         break;
3449                 }
3450
3451                 /* Validate that the target is in a valid r/w GPU domain */
3452                 if (reloc.write_domain & (reloc.write_domain - 1)) {
3453                         DRM_ERROR("reloc with multiple write domains: "
3454                                   "obj %p target %d offset %d "
3455                                   "read %08x write %08x",
3456                                   obj, reloc.target_handle,
3457                                   (int) reloc.offset,
3458                                   reloc.read_domains,
3459                                   reloc.write_domain);
3460                         ret = -EINVAL;
3461                         break;
3462                 }
3463                 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
3464                     reloc.read_domains & I915_GEM_DOMAIN_CPU) {
3465                         DRM_ERROR("reloc with read/write CPU domains: "
3466                                   "obj %p target %d offset %d "
3467                                   "read %08x write %08x",
3468                                   obj, reloc.target_handle,
3469                                   (int) reloc.offset,
3470                                   reloc.read_domains,
3471                                   reloc.write_domain);
3472                         ret = -EINVAL;
3473                         break;
3474                 }
3475                 if (reloc.write_domain && target_obj->pending_write_domain &&
3476                     reloc.write_domain != target_obj->pending_write_domain) {
3477                         DRM_ERROR("Write domain conflict: "
3478                                   "obj %p target %d offset %d "
3479                                   "new %08x old %08x\n",
3480                                   obj, reloc.target_handle,
3481                                   (int) reloc.offset,
3482                                   reloc.write_domain,
3483                                   target_obj->pending_write_domain);
3484                         ret = -EINVAL;
3485                         break;
3486                 }
3487
3488                 target_obj->pending_read_domains |= reloc.read_domains;
3489                 target_obj->pending_write_domain |= reloc.write_domain;
3490
3491                 /* If the relocation already has the right value in it, no
3492                  * more work needs to be done.
3493                  */
3494                 if (target_offset == reloc.presumed_offset)
3495                         continue;
3496
3497                 /* Check that the relocation address is valid... */
3498                 if (reloc.offset > obj->base.size - 4) {
3499                         DRM_ERROR("Relocation beyond object bounds: "
3500                                   "obj %p target %d offset %d size %d.\n",
3501                                   obj, reloc.target_handle,
3502                                   (int) reloc.offset, (int) obj->base.size);
3503                         ret = -EINVAL;
3504                         break;
3505                 }
3506                 if (reloc.offset & 3) {
3507                         DRM_ERROR("Relocation not 4-byte aligned: "
3508                                   "obj %p target %d offset %d.\n",
3509                                   obj, reloc.target_handle,
3510                                   (int) reloc.offset);
3511                         ret = -EINVAL;
3512                         break;
3513                 }
3514
3515                 /* and points to somewhere within the target object. */
3516                 if (reloc.delta >= target_obj->size) {
3517                         DRM_ERROR("Relocation beyond target object bounds: "
3518                                   "obj %p target %d delta %d size %d.\n",
3519                                   obj, reloc.target_handle,
3520                                   (int) reloc.delta, (int) target_obj->size);
3521                         ret = -EINVAL;
3522                         break;
3523                 }
3524
3525                 reloc.delta += target_offset;
3526                 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3527                         uint32_t page_offset = reloc.offset & ~PAGE_MASK;
3528                         char *vaddr;
3529
3530                         vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
3531                         *(uint32_t *)(vaddr + page_offset) = reloc.delta;
3532                         kunmap_atomic(vaddr);
3533                 } else {
3534                         uint32_t __iomem *reloc_entry;
3535                         void __iomem *reloc_page;
3536
3537                         ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3538                         if (ret)
3539                                 break;
3540
3541                         /* Map the page containing the relocation we're going to perform.  */
3542                         reloc.offset += obj->gtt_offset;
3543                         reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3544                                                               reloc.offset & PAGE_MASK);
3545                         reloc_entry = (uint32_t __iomem *)
3546                                 (reloc_page + (reloc.offset & ~PAGE_MASK));
3547                         iowrite32(reloc.delta, reloc_entry);
3548                         io_mapping_unmap_atomic(reloc_page);
3549                 }
3550
3551                 /* and update the user's relocation entry */
3552                 reloc.presumed_offset = target_offset;
3553                 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3554                                               &reloc.presumed_offset,
3555                                               sizeof(reloc.presumed_offset))) {
3556                     ret = -EFAULT;
3557                     break;
3558                 }
3559         }
3560
3561         drm_gem_object_unreference(target_obj);
3562         return ret;
3563 }
3564
3565 static int
3566 i915_gem_execbuffer_pin(struct drm_device *dev,
3567                         struct drm_file *file,
3568                         struct drm_gem_object **object_list,
3569                         struct drm_i915_gem_exec_object2 *exec_list,
3570                         int count)
3571 {
3572         struct drm_i915_private *dev_priv = dev->dev_private;
3573         int ret, i, retry;
3574
3575         /* attempt to pin all of the buffers into the GTT */
3576         retry = 0;
3577         do {
3578                 ret = 0;
3579                 for (i = 0; i < count; i++) {
3580                         struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
3581                         struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3582                         bool need_fence =
3583                                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3584                                 obj->tiling_mode != I915_TILING_NONE;
3585
3586                         /* g33/pnv can't fence buffers in the unmappable part */
3587                         bool need_mappable =
3588                                 entry->relocation_count ? true : need_fence;
3589
3590                         /* Check fence reg constraints and rebind if necessary */
3591                         if (need_mappable && !obj->map_and_fenceable) {
3592                                 ret = i915_gem_object_unbind(&obj->base);
3593                                 if (ret)
3594                                         break;
3595                         }
3596
3597                         ret = i915_gem_object_pin(&obj->base,
3598                                                   entry->alignment,
3599                                                   need_mappable);
3600                         if (ret)
3601                                 break;
3602
3603                         /*
3604                          * Pre-965 chips need a fence register set up in order
3605                          * to properly handle blits to/from tiled surfaces.
3606                          */
3607                         if (need_fence) {
3608                                 ret = i915_gem_object_get_fence_reg(&obj->base, true);
3609                                 if (ret) {
3610                                         i915_gem_object_unpin(&obj->base);
3611                                         break;
3612                                 }
3613
3614                                 dev_priv->fence_regs[obj->fence_reg].gpu = true;
3615                         }
3616
3617                         entry->offset = obj->gtt_offset;
3618                 }
3619
3620                 while (i--)
3621                         i915_gem_object_unpin(object_list[i]);
3622
3623                 if (ret != -ENOSPC || retry > 1)
3624                         return ret;
3625
3626                 /* First attempt, just clear anything that is purgeable.
3627                  * Second attempt, clear the entire GTT.
3628                  */
3629                 ret = i915_gem_evict_everything(dev, retry == 0);
3630                 if (ret)
3631                         return ret;
3632
3633                 retry++;
3634         } while (1);
3635 }
3636
3637 static int
3638 i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3639                                 struct drm_file *file,
3640                                 struct intel_ring_buffer *ring,
3641                                 struct drm_gem_object **objects,
3642                                 int count)
3643 {
3644         struct change_domains cd;
3645         int ret, i;
3646
3647         cd.invalidate_domains = 0;
3648         cd.flush_domains = 0;
3649         cd.flush_rings = 0;
3650         for (i = 0; i < count; i++)
3651                 i915_gem_object_set_to_gpu_domain(objects[i], ring, &cd);
3652
3653         if (cd.invalidate_domains | cd.flush_domains) {
3654 #if WATCH_EXEC
3655                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3656                           __func__,
3657                          cd.invalidate_domains,
3658                          cd.flush_domains);
3659 #endif
3660                 i915_gem_flush(dev, file,
3661                                cd.invalidate_domains,
3662                                cd.flush_domains,
3663                                cd.flush_rings);
3664         }
3665
3666         for (i = 0; i < count; i++) {
3667                 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3668                 /* XXX replace with semaphores */
3669                 if (obj->ring && ring != obj->ring) {
3670                         ret = i915_gem_object_wait_rendering(&obj->base, true);
3671                         if (ret)
3672                                 return ret;
3673                 }
3674         }
3675
3676         return 0;
3677 }
3678
3679 /* Throttle our rendering by waiting until the ring has completed our requests
3680  * emitted over 20 msec ago.
3681  *
3682  * Note that if we were to use the current jiffies each time around the loop,
3683  * we wouldn't escape the function with any frames outstanding if the time to
3684  * render a frame was over 20ms.
3685  *
3686  * This should get us reasonable parallelism between CPU and GPU but also
3687  * relatively low latency when blocking on a particular request to finish.
3688  */
3689 static int
3690 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3691 {
3692         struct drm_i915_private *dev_priv = dev->dev_private;
3693         struct drm_i915_file_private *file_priv = file->driver_priv;
3694         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3695         struct drm_i915_gem_request *request;
3696         struct intel_ring_buffer *ring = NULL;
3697         u32 seqno = 0;
3698         int ret;
3699
3700         spin_lock(&file_priv->mm.lock);
3701         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3702                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3703                         break;
3704
3705                 ring = request->ring;
3706                 seqno = request->seqno;
3707         }
3708         spin_unlock(&file_priv->mm.lock);
3709
3710         if (seqno == 0)
3711                 return 0;
3712
3713         ret = 0;
3714         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3715                 /* And wait for the seqno passing without holding any locks and
3716                  * causing extra latency for others. This is safe as the irq
3717                  * generation is designed to be run atomically and so is
3718                  * lockless.
3719                  */
3720                 ring->user_irq_get(ring);
3721                 ret = wait_event_interruptible(ring->irq_queue,
3722                                                i915_seqno_passed(ring->get_seqno(ring), seqno)
3723                                                || atomic_read(&dev_priv->mm.wedged));
3724                 ring->user_irq_put(ring);
3725
3726                 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3727                         ret = -EIO;
3728         }
3729
3730         if (ret == 0)
3731                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3732
3733         return ret;
3734 }
3735
3736 static int
3737 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
3738                           uint64_t exec_offset)
3739 {
3740         uint32_t exec_start, exec_len;
3741
3742         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3743         exec_len = (uint32_t) exec->batch_len;
3744
3745         if ((exec_start | exec_len) & 0x7)
3746                 return -EINVAL;
3747
3748         if (!exec_start)
3749                 return -EINVAL;
3750
3751         return 0;
3752 }
3753
3754 static int
3755 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3756                    int count)
3757 {
3758         int i;
3759
3760         for (i = 0; i < count; i++) {
3761                 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3762                 size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
3763
3764                 if (!access_ok(VERIFY_READ, ptr, length))
3765                         return -EFAULT;
3766
3767                 /* we may also need to update the presumed offsets */
3768                 if (!access_ok(VERIFY_WRITE, ptr, length))
3769                         return -EFAULT;
3770
3771                 if (fault_in_pages_readable(ptr, length))
3772                         return -EFAULT;
3773         }
3774
3775         return 0;
3776 }
3777
3778 static int
3779 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3780                        struct drm_file *file,
3781                        struct drm_i915_gem_execbuffer2 *args,
3782                        struct drm_i915_gem_exec_object2 *exec_list)
3783 {
3784         drm_i915_private_t *dev_priv = dev->dev_private;
3785         struct drm_gem_object **object_list = NULL;
3786         struct drm_gem_object *batch_obj;
3787         struct drm_clip_rect *cliprects = NULL;
3788         struct drm_i915_gem_request *request = NULL;
3789         int ret, i, flips;
3790         uint64_t exec_offset;
3791
3792         struct intel_ring_buffer *ring = NULL;
3793
3794         ret = i915_gem_check_is_wedged(dev);
3795         if (ret)
3796                 return ret;
3797
3798         ret = validate_exec_list(exec_list, args->buffer_count);
3799         if (ret)
3800                 return ret;
3801
3802 #if WATCH_EXEC
3803         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3804                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3805 #endif
3806         switch (args->flags & I915_EXEC_RING_MASK) {
3807         case I915_EXEC_DEFAULT:
3808         case I915_EXEC_RENDER:
3809                 ring = &dev_priv->render_ring;
3810                 break;
3811         case I915_EXEC_BSD:
3812                 if (!HAS_BSD(dev)) {
3813                         DRM_ERROR("execbuf with invalid ring (BSD)\n");
3814                         return -EINVAL;
3815                 }
3816                 ring = &dev_priv->bsd_ring;
3817                 break;
3818         case I915_EXEC_BLT:
3819                 if (!HAS_BLT(dev)) {
3820                         DRM_ERROR("execbuf with invalid ring (BLT)\n");
3821                         return -EINVAL;
3822                 }
3823                 ring = &dev_priv->blt_ring;
3824                 break;
3825         default:
3826                 DRM_ERROR("execbuf with unknown ring: %d\n",
3827                           (int)(args->flags & I915_EXEC_RING_MASK));
3828                 return -EINVAL;
3829         }
3830
3831         if (args->buffer_count < 1) {
3832                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3833                 return -EINVAL;
3834         }
3835         object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3836         if (object_list == NULL) {
3837                 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3838                           args->buffer_count);
3839                 ret = -ENOMEM;
3840                 goto pre_mutex_err;
3841         }
3842
3843         if (args->num_cliprects != 0) {
3844                 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3845                                     GFP_KERNEL);
3846                 if (cliprects == NULL) {
3847                         ret = -ENOMEM;
3848                         goto pre_mutex_err;
3849                 }
3850
3851                 ret = copy_from_user(cliprects,
3852                                      (struct drm_clip_rect __user *)
3853                                      (uintptr_t) args->cliprects_ptr,
3854                                      sizeof(*cliprects) * args->num_cliprects);
3855                 if (ret != 0) {
3856                         DRM_ERROR("copy %d cliprects failed: %d\n",
3857                                   args->num_cliprects, ret);
3858                         ret = -EFAULT;
3859                         goto pre_mutex_err;
3860                 }
3861         }
3862
3863         request = kzalloc(sizeof(*request), GFP_KERNEL);
3864         if (request == NULL) {
3865                 ret = -ENOMEM;
3866                 goto pre_mutex_err;
3867         }
3868
3869         ret = i915_mutex_lock_interruptible(dev);
3870         if (ret)
3871                 goto pre_mutex_err;
3872
3873         if (dev_priv->mm.suspended) {
3874                 mutex_unlock(&dev->struct_mutex);
3875                 ret = -EBUSY;
3876                 goto pre_mutex_err;
3877         }
3878
3879         /* Look up object handles */
3880         for (i = 0; i < args->buffer_count; i++) {
3881                 struct drm_i915_gem_object *obj_priv;
3882
3883                 object_list[i] = drm_gem_object_lookup(dev, file,
3884                                                        exec_list[i].handle);
3885                 if (object_list[i] == NULL) {
3886                         DRM_ERROR("Invalid object handle %d at index %d\n",
3887                                    exec_list[i].handle, i);
3888                         /* prevent error path from reading uninitialized data */
3889                         args->buffer_count = i + 1;
3890                         ret = -ENOENT;
3891                         goto err;
3892                 }
3893
3894                 obj_priv = to_intel_bo(object_list[i]);
3895                 if (obj_priv->in_execbuffer) {
3896                         DRM_ERROR("Object %p appears more than once in object list\n",
3897                                    object_list[i]);
3898                         /* prevent error path from reading uninitialized data */
3899                         args->buffer_count = i + 1;
3900                         ret = -EINVAL;
3901                         goto err;
3902                 }
3903                 obj_priv->in_execbuffer = true;
3904         }
3905
3906         /* Move the objects en-masse into the GTT, evicting if necessary. */
3907         ret = i915_gem_execbuffer_pin(dev, file,
3908                                       object_list, exec_list,
3909                                       args->buffer_count);
3910         if (ret)
3911                 goto err;
3912
3913         /* The objects are in their final locations, apply the relocations. */
3914         for (i = 0; i < args->buffer_count; i++) {
3915                 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3916                 obj->base.pending_read_domains = 0;
3917                 obj->base.pending_write_domain = 0;
3918                 ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
3919                 if (ret)
3920                         goto err;
3921         }
3922
3923         /* Set the pending read domains for the batch buffer to COMMAND */
3924         batch_obj = object_list[args->buffer_count-1];
3925         if (batch_obj->pending_write_domain) {
3926                 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3927                 ret = -EINVAL;
3928                 goto err;
3929         }
3930         batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3931
3932         /* Sanity check the batch buffer */
3933         exec_offset = to_intel_bo(batch_obj)->gtt_offset;
3934         ret = i915_gem_check_execbuffer(args, exec_offset);
3935         if (ret != 0) {
3936                 DRM_ERROR("execbuf with invalid offset/length\n");
3937                 goto err;
3938         }
3939
3940         ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3941                                               object_list, args->buffer_count);
3942         if (ret)
3943                 goto err;
3944
3945 #if WATCH_COHERENCY
3946         for (i = 0; i < args->buffer_count; i++) {
3947                 i915_gem_object_check_coherency(object_list[i],
3948                                                 exec_list[i].handle);
3949         }
3950 #endif
3951
3952 #if WATCH_EXEC
3953         i915_gem_dump_object(batch_obj,
3954                               args->batch_len,
3955                               __func__,
3956                               ~0);
3957 #endif
3958
3959         /* Check for any pending flips. As we only maintain a flip queue depth
3960          * of 1, we can simply insert a WAIT for the next display flip prior
3961          * to executing the batch and avoid stalling the CPU.
3962          */
3963         flips = 0;
3964         for (i = 0; i < args->buffer_count; i++) {
3965                 if (object_list[i]->write_domain)
3966                         flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
3967         }
3968         if (flips) {
3969                 int plane, flip_mask;
3970
3971                 for (plane = 0; flips >> plane; plane++) {
3972                         if (((flips >> plane) & 1) == 0)
3973                                 continue;
3974
3975                         if (plane)
3976                                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
3977                         else
3978                                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
3979
3980                         ret = intel_ring_begin(ring, 2);
3981                         if (ret)
3982                                 goto err;
3983
3984                         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
3985                         intel_ring_emit(ring, MI_NOOP);
3986                         intel_ring_advance(ring);
3987                 }
3988         }
3989
3990         /* Exec the batchbuffer */
3991         ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset);
3992         if (ret) {
3993                 DRM_ERROR("dispatch failed %d\n", ret);
3994                 goto err;
3995         }
3996
3997         for (i = 0; i < args->buffer_count; i++) {
3998                 struct drm_gem_object *obj = object_list[i];
3999
4000                 obj->read_domains = obj->pending_read_domains;
4001                 obj->write_domain = obj->pending_write_domain;
4002
4003                 i915_gem_object_move_to_active(obj, ring);
4004                 if (obj->write_domain) {
4005                         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4006                         obj_priv->dirty = 1;
4007                         list_move_tail(&obj_priv->gpu_write_list,
4008                                        &ring->gpu_write_list);
4009                         intel_mark_busy(dev, obj);
4010                 }
4011
4012                 trace_i915_gem_object_change_domain(obj,
4013                                                     obj->read_domains,
4014                                                     obj->write_domain);
4015         }
4016
4017         /*
4018          * Ensure that the commands in the batch buffer are
4019          * finished before the interrupt fires
4020          */
4021         i915_retire_commands(dev, ring);
4022
4023         if (i915_add_request(dev, file, request, ring))
4024                 i915_gem_next_request_seqno(dev, ring);
4025         else
4026                 request = NULL;
4027
4028 err:
4029         for (i = 0; i < args->buffer_count; i++) {
4030                 if (object_list[i] == NULL)
4031                     break;
4032
4033                 to_intel_bo(object_list[i])->in_execbuffer = false;
4034                 drm_gem_object_unreference(object_list[i]);
4035         }
4036
4037         mutex_unlock(&dev->struct_mutex);
4038
4039 pre_mutex_err:
4040         drm_free_large(object_list);
4041         kfree(cliprects);
4042         kfree(request);
4043
4044         return ret;
4045 }
4046
4047 /*
4048  * Legacy execbuffer just creates an exec2 list from the original exec object
4049  * list array and passes it to the real function.
4050  */
4051 int
4052 i915_gem_execbuffer(struct drm_device *dev, void *data,
4053                     struct drm_file *file_priv)
4054 {
4055         struct drm_i915_gem_execbuffer *args = data;
4056         struct drm_i915_gem_execbuffer2 exec2;
4057         struct drm_i915_gem_exec_object *exec_list = NULL;
4058         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4059         int ret, i;
4060
4061 #if WATCH_EXEC
4062         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4063                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4064 #endif
4065
4066         if (args->buffer_count < 1) {
4067                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4068                 return -EINVAL;
4069         }
4070
4071         /* Copy in the exec list from userland */
4072         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4073         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4074         if (exec_list == NULL || exec2_list == NULL) {
4075                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4076                           args->buffer_count);
4077                 drm_free_large(exec_list);
4078                 drm_free_large(exec2_list);
4079                 return -ENOMEM;
4080         }
4081         ret = copy_from_user(exec_list,
4082                              (struct drm_i915_relocation_entry __user *)
4083                              (uintptr_t) args->buffers_ptr,
4084                              sizeof(*exec_list) * args->buffer_count);
4085         if (ret != 0) {
4086                 DRM_ERROR("copy %d exec entries failed %d\n",
4087                           args->buffer_count, ret);
4088                 drm_free_large(exec_list);
4089                 drm_free_large(exec2_list);
4090                 return -EFAULT;
4091         }
4092
4093         for (i = 0; i < args->buffer_count; i++) {
4094                 exec2_list[i].handle = exec_list[i].handle;
4095                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4096                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4097                 exec2_list[i].alignment = exec_list[i].alignment;
4098                 exec2_list[i].offset = exec_list[i].offset;
4099                 if (INTEL_INFO(dev)->gen < 4)
4100                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4101                 else
4102                         exec2_list[i].flags = 0;
4103         }
4104
4105         exec2.buffers_ptr = args->buffers_ptr;
4106         exec2.buffer_count = args->buffer_count;
4107         exec2.batch_start_offset = args->batch_start_offset;
4108         exec2.batch_len = args->batch_len;
4109         exec2.DR1 = args->DR1;
4110         exec2.DR4 = args->DR4;
4111         exec2.num_cliprects = args->num_cliprects;
4112         exec2.cliprects_ptr = args->cliprects_ptr;
4113         exec2.flags = I915_EXEC_RENDER;
4114
4115         ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4116         if (!ret) {
4117                 /* Copy the new buffer offsets back to the user's exec list. */
4118                 for (i = 0; i < args->buffer_count; i++)
4119                         exec_list[i].offset = exec2_list[i].offset;
4120                 /* ... and back out to userspace */
4121                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4122                                    (uintptr_t) args->buffers_ptr,
4123                                    exec_list,
4124                                    sizeof(*exec_list) * args->buffer_count);
4125                 if (ret) {
4126                         ret = -EFAULT;
4127                         DRM_ERROR("failed to copy %d exec entries "
4128                                   "back to user (%d)\n",
4129                                   args->buffer_count, ret);
4130                 }
4131         }
4132
4133         drm_free_large(exec_list);
4134         drm_free_large(exec2_list);
4135         return ret;
4136 }
4137
4138 int
4139 i915_gem_execbuffer2(struct drm_device *dev, void *data,
4140                      struct drm_file *file_priv)
4141 {
4142         struct drm_i915_gem_execbuffer2 *args = data;
4143         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4144         int ret;
4145
4146 #if WATCH_EXEC
4147         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4148                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4149 #endif
4150
4151         if (args->buffer_count < 1) {
4152                 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4153                 return -EINVAL;
4154         }
4155
4156         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4157         if (exec2_list == NULL) {
4158                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4159                           args->buffer_count);
4160                 return -ENOMEM;
4161         }
4162         ret = copy_from_user(exec2_list,
4163                              (struct drm_i915_relocation_entry __user *)
4164                              (uintptr_t) args->buffers_ptr,
4165                              sizeof(*exec2_list) * args->buffer_count);
4166         if (ret != 0) {
4167                 DRM_ERROR("copy %d exec entries failed %d\n",
4168                           args->buffer_count, ret);
4169                 drm_free_large(exec2_list);
4170                 return -EFAULT;
4171         }
4172
4173         ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4174         if (!ret) {
4175                 /* Copy the new buffer offsets back to the user's exec list. */
4176                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4177                                    (uintptr_t) args->buffers_ptr,
4178                                    exec2_list,
4179                                    sizeof(*exec2_list) * args->buffer_count);
4180                 if (ret) {
4181                         ret = -EFAULT;
4182                         DRM_ERROR("failed to copy %d exec entries "
4183                                   "back to user (%d)\n",
4184                                   args->buffer_count, ret);
4185                 }
4186         }
4187
4188         drm_free_large(exec2_list);
4189         return ret;
4190 }
4191
4192 int
4193 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
4194                     bool map_and_fenceable)
4195 {
4196         struct drm_device *dev = obj->dev;
4197         struct drm_i915_private *dev_priv = dev->dev_private;
4198         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4199         int ret;
4200
4201         BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4202         BUG_ON(map_and_fenceable && !map_and_fenceable);
4203         WARN_ON(i915_verify_lists(dev));
4204
4205         if (obj_priv->gtt_space != NULL) {
4206                 if ((alignment && obj_priv->gtt_offset & (alignment - 1)) ||
4207                     (map_and_fenceable && !obj_priv->map_and_fenceable)) {
4208                         WARN(obj_priv->pin_count,
4209                              "bo is already pinned with incorrect alignment:"
4210                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
4211                              " obj->map_and_fenceable=%d\n",
4212                              obj_priv->gtt_offset, alignment,
4213                              map_and_fenceable,
4214                              obj_priv->map_and_fenceable);
4215                         ret = i915_gem_object_unbind(obj);
4216                         if (ret)
4217                                 return ret;
4218                 }
4219         }
4220
4221         if (obj_priv->gtt_space == NULL) {
4222                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
4223                                                   map_and_fenceable);
4224                 if (ret)
4225                         return ret;
4226         }
4227
4228         if (obj_priv->pin_count++ == 0) {
4229                 i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable);
4230                 if (!obj_priv->active)
4231                         list_move_tail(&obj_priv->mm_list,
4232                                        &dev_priv->mm.pinned_list);
4233         }
4234         BUG_ON(!obj_priv->pin_mappable && map_and_fenceable);
4235
4236         WARN_ON(i915_verify_lists(dev));
4237         return 0;
4238 }
4239
4240 void
4241 i915_gem_object_unpin(struct drm_gem_object *obj)
4242 {
4243         struct drm_device *dev = obj->dev;
4244         drm_i915_private_t *dev_priv = dev->dev_private;
4245         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4246
4247         WARN_ON(i915_verify_lists(dev));
4248         BUG_ON(obj_priv->pin_count == 0);
4249         BUG_ON(obj_priv->gtt_space == NULL);
4250
4251         if (--obj_priv->pin_count == 0) {
4252                 if (!obj_priv->active)
4253                         list_move_tail(&obj_priv->mm_list,
4254                                        &dev_priv->mm.inactive_list);
4255                 i915_gem_info_remove_pin(dev_priv, obj_priv);
4256         }
4257         WARN_ON(i915_verify_lists(dev));
4258 }
4259
4260 int
4261 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4262                    struct drm_file *file_priv)
4263 {
4264         struct drm_i915_gem_pin *args = data;
4265         struct drm_gem_object *obj;
4266         struct drm_i915_gem_object *obj_priv;
4267         int ret;
4268
4269         ret = i915_mutex_lock_interruptible(dev);
4270         if (ret)
4271                 return ret;
4272
4273         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4274         if (obj == NULL) {
4275                 ret = -ENOENT;
4276                 goto unlock;
4277         }
4278         obj_priv = to_intel_bo(obj);
4279
4280         if (obj_priv->madv != I915_MADV_WILLNEED) {
4281                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4282                 ret = -EINVAL;
4283                 goto out;
4284         }
4285
4286         if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4287                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4288                           args->handle);
4289                 ret = -EINVAL;
4290                 goto out;
4291         }
4292
4293         obj_priv->user_pin_count++;
4294         obj_priv->pin_filp = file_priv;
4295         if (obj_priv->user_pin_count == 1) {
4296                 ret = i915_gem_object_pin(obj, args->alignment, true);
4297                 if (ret)
4298                         goto out;
4299         }
4300
4301         /* XXX - flush the CPU caches for pinned objects
4302          * as the X server doesn't manage domains yet
4303          */
4304         i915_gem_object_flush_cpu_write_domain(obj);
4305         args->offset = obj_priv->gtt_offset;
4306 out:
4307         drm_gem_object_unreference(obj);
4308 unlock:
4309         mutex_unlock(&dev->struct_mutex);
4310         return ret;
4311 }
4312
4313 int
4314 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4315                      struct drm_file *file_priv)
4316 {
4317         struct drm_i915_gem_pin *args = data;
4318         struct drm_gem_object *obj;
4319         struct drm_i915_gem_object *obj_priv;
4320         int ret;
4321
4322         ret = i915_mutex_lock_interruptible(dev);
4323         if (ret)
4324                 return ret;
4325
4326         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4327         if (obj == NULL) {
4328                 ret = -ENOENT;
4329                 goto unlock;
4330         }
4331         obj_priv = to_intel_bo(obj);
4332
4333         if (obj_priv->pin_filp != file_priv) {
4334                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4335                           args->handle);
4336                 ret = -EINVAL;
4337                 goto out;
4338         }
4339         obj_priv->user_pin_count--;
4340         if (obj_priv->user_pin_count == 0) {
4341                 obj_priv->pin_filp = NULL;
4342                 i915_gem_object_unpin(obj);
4343         }
4344
4345 out:
4346         drm_gem_object_unreference(obj);
4347 unlock:
4348         mutex_unlock(&dev->struct_mutex);
4349         return ret;
4350 }
4351
4352 int
4353 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4354                     struct drm_file *file_priv)
4355 {
4356         struct drm_i915_gem_busy *args = data;
4357         struct drm_gem_object *obj;
4358         struct drm_i915_gem_object *obj_priv;
4359         int ret;
4360
4361         ret = i915_mutex_lock_interruptible(dev);
4362         if (ret)
4363                 return ret;
4364
4365         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4366         if (obj == NULL) {
4367                 ret = -ENOENT;
4368                 goto unlock;
4369         }
4370         obj_priv = to_intel_bo(obj);
4371
4372         /* Count all active objects as busy, even if they are currently not used
4373          * by the gpu. Users of this interface expect objects to eventually
4374          * become non-busy without any further actions, therefore emit any
4375          * necessary flushes here.
4376          */
4377         args->busy = obj_priv->active;
4378         if (args->busy) {
4379                 /* Unconditionally flush objects, even when the gpu still uses this
4380                  * object. Userspace calling this function indicates that it wants to
4381                  * use this buffer rather sooner than later, so issuing the required
4382                  * flush earlier is beneficial.
4383                  */
4384                 if (obj->write_domain & I915_GEM_GPU_DOMAINS)
4385                         i915_gem_flush_ring(dev, file_priv,
4386                                             obj_priv->ring,
4387                                             0, obj->write_domain);
4388
4389                 /* Update the active list for the hardware's current position.
4390                  * Otherwise this only updates on a delayed timer or when irqs
4391                  * are actually unmasked, and our working set ends up being
4392                  * larger than required.
4393                  */
4394                 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4395
4396                 args->busy = obj_priv->active;
4397         }
4398
4399         drm_gem_object_unreference(obj);
4400 unlock:
4401         mutex_unlock(&dev->struct_mutex);
4402         return ret;
4403 }
4404
4405 int
4406 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4407                         struct drm_file *file_priv)
4408 {
4409     return i915_gem_ring_throttle(dev, file_priv);
4410 }
4411
4412 int
4413 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4414                        struct drm_file *file_priv)
4415 {
4416         struct drm_i915_gem_madvise *args = data;
4417         struct drm_gem_object *obj;
4418         struct drm_i915_gem_object *obj_priv;
4419         int ret;
4420
4421         switch (args->madv) {
4422         case I915_MADV_DONTNEED:
4423         case I915_MADV_WILLNEED:
4424             break;
4425         default:
4426             return -EINVAL;
4427         }
4428
4429         ret = i915_mutex_lock_interruptible(dev);
4430         if (ret)
4431                 return ret;
4432
4433         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4434         if (obj == NULL) {
4435                 ret = -ENOENT;
4436                 goto unlock;
4437         }
4438         obj_priv = to_intel_bo(obj);
4439
4440         if (obj_priv->pin_count) {
4441                 ret = -EINVAL;
4442                 goto out;
4443         }
4444
4445         if (obj_priv->madv != __I915_MADV_PURGED)
4446                 obj_priv->madv = args->madv;
4447
4448         /* if the object is no longer bound, discard its backing storage */
4449         if (i915_gem_object_is_purgeable(obj_priv) &&
4450             obj_priv->gtt_space == NULL)
4451                 i915_gem_object_truncate(obj);
4452
4453         args->retained = obj_priv->madv != __I915_MADV_PURGED;
4454
4455 out:
4456         drm_gem_object_unreference(obj);
4457 unlock:
4458         mutex_unlock(&dev->struct_mutex);
4459         return ret;
4460 }
4461
4462 struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4463                                               size_t size)
4464 {
4465         struct drm_i915_private *dev_priv = dev->dev_private;
4466         struct drm_i915_gem_object *obj;
4467
4468         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4469         if (obj == NULL)
4470                 return NULL;
4471
4472         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4473                 kfree(obj);
4474                 return NULL;
4475         }
4476
4477         i915_gem_info_add_obj(dev_priv, size);
4478
4479         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4480         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4481
4482         obj->agp_type = AGP_USER_MEMORY;
4483         obj->base.driver_private = NULL;
4484         obj->fence_reg = I915_FENCE_REG_NONE;
4485         INIT_LIST_HEAD(&obj->mm_list);
4486         INIT_LIST_HEAD(&obj->ring_list);
4487         INIT_LIST_HEAD(&obj->gpu_write_list);
4488         obj->madv = I915_MADV_WILLNEED;
4489         /* Avoid an unnecessary call to unbind on the first bind. */
4490         obj->map_and_fenceable = true;
4491
4492         return &obj->base;
4493 }
4494
4495 int i915_gem_init_object(struct drm_gem_object *obj)
4496 {
4497         BUG();
4498
4499         return 0;
4500 }
4501
4502 static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4503 {
4504         struct drm_device *dev = obj->dev;
4505         drm_i915_private_t *dev_priv = dev->dev_private;
4506         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4507         int ret;
4508
4509         ret = i915_gem_object_unbind(obj);
4510         if (ret == -ERESTARTSYS) {
4511                 list_move(&obj_priv->mm_list,
4512                           &dev_priv->mm.deferred_free_list);
4513                 return;
4514         }
4515
4516         if (obj->map_list.map)
4517                 i915_gem_free_mmap_offset(obj);
4518
4519         drm_gem_object_release(obj);
4520         i915_gem_info_remove_obj(dev_priv, obj->size);
4521
4522         kfree(obj_priv->page_cpu_valid);
4523         kfree(obj_priv->bit_17);
4524         kfree(obj_priv);
4525 }
4526
4527 void i915_gem_free_object(struct drm_gem_object *obj)
4528 {
4529         struct drm_device *dev = obj->dev;
4530         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4531
4532         trace_i915_gem_object_destroy(obj);
4533
4534         while (obj_priv->pin_count > 0)
4535                 i915_gem_object_unpin(obj);
4536
4537         if (obj_priv->phys_obj)
4538                 i915_gem_detach_phys_object(dev, obj);
4539
4540         i915_gem_free_object_tail(obj);
4541 }
4542
4543 int
4544 i915_gem_idle(struct drm_device *dev)
4545 {
4546         drm_i915_private_t *dev_priv = dev->dev_private;
4547         int ret;
4548
4549         mutex_lock(&dev->struct_mutex);
4550
4551         if (dev_priv->mm.suspended) {
4552                 mutex_unlock(&dev->struct_mutex);
4553                 return 0;
4554         }
4555
4556         ret = i915_gpu_idle(dev);
4557         if (ret) {
4558                 mutex_unlock(&dev->struct_mutex);
4559                 return ret;
4560         }
4561
4562         /* Under UMS, be paranoid and evict. */
4563         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4564                 ret = i915_gem_evict_inactive(dev, false);
4565                 if (ret) {
4566                         mutex_unlock(&dev->struct_mutex);
4567                         return ret;
4568                 }
4569         }
4570
4571         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4572          * We need to replace this with a semaphore, or something.
4573          * And not confound mm.suspended!
4574          */
4575         dev_priv->mm.suspended = 1;
4576         del_timer_sync(&dev_priv->hangcheck_timer);
4577
4578         i915_kernel_lost_context(dev);
4579         i915_gem_cleanup_ringbuffer(dev);
4580
4581         mutex_unlock(&dev->struct_mutex);
4582
4583         /* Cancel the retire work handler, which should be idle now. */
4584         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4585
4586         return 0;
4587 }
4588
4589 /*
4590  * 965+ support PIPE_CONTROL commands, which provide finer grained control
4591  * over cache flushing.
4592  */
4593 static int
4594 i915_gem_init_pipe_control(struct drm_device *dev)
4595 {
4596         drm_i915_private_t *dev_priv = dev->dev_private;
4597         struct drm_gem_object *obj;
4598         struct drm_i915_gem_object *obj_priv;
4599         int ret;
4600
4601         obj = i915_gem_alloc_object(dev, 4096);
4602         if (obj == NULL) {
4603                 DRM_ERROR("Failed to allocate seqno page\n");
4604                 ret = -ENOMEM;
4605                 goto err;
4606         }
4607         obj_priv = to_intel_bo(obj);
4608         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4609
4610         ret = i915_gem_object_pin(obj, 4096, true);
4611         if (ret)
4612                 goto err_unref;
4613
4614         dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4615         dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
4616         if (dev_priv->seqno_page == NULL)
4617                 goto err_unpin;
4618
4619         dev_priv->seqno_obj = obj;
4620         memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4621
4622         return 0;
4623
4624 err_unpin:
4625         i915_gem_object_unpin(obj);
4626 err_unref:
4627         drm_gem_object_unreference(obj);
4628 err:
4629         return ret;
4630 }
4631
4632
4633 static void
4634 i915_gem_cleanup_pipe_control(struct drm_device *dev)
4635 {
4636         drm_i915_private_t *dev_priv = dev->dev_private;
4637         struct drm_gem_object *obj;
4638         struct drm_i915_gem_object *obj_priv;
4639
4640         obj = dev_priv->seqno_obj;
4641         obj_priv = to_intel_bo(obj);
4642         kunmap(obj_priv->pages[0]);
4643         i915_gem_object_unpin(obj);
4644         drm_gem_object_unreference(obj);
4645         dev_priv->seqno_obj = NULL;
4646
4647         dev_priv->seqno_page = NULL;
4648 }
4649
4650 int
4651 i915_gem_init_ringbuffer(struct drm_device *dev)
4652 {
4653         drm_i915_private_t *dev_priv = dev->dev_private;
4654         int ret;
4655
4656         if (HAS_PIPE_CONTROL(dev)) {
4657                 ret = i915_gem_init_pipe_control(dev);
4658                 if (ret)
4659                         return ret;
4660         }
4661
4662         ret = intel_init_render_ring_buffer(dev);
4663         if (ret)
4664                 goto cleanup_pipe_control;
4665
4666         if (HAS_BSD(dev)) {
4667                 ret = intel_init_bsd_ring_buffer(dev);
4668                 if (ret)
4669                         goto cleanup_render_ring;
4670         }
4671
4672         if (HAS_BLT(dev)) {
4673                 ret = intel_init_blt_ring_buffer(dev);
4674                 if (ret)
4675                         goto cleanup_bsd_ring;
4676         }
4677
4678         dev_priv->next_seqno = 1;
4679
4680         return 0;
4681
4682 cleanup_bsd_ring:
4683         intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
4684 cleanup_render_ring:
4685         intel_cleanup_ring_buffer(&dev_priv->render_ring);
4686 cleanup_pipe_control:
4687         if (HAS_PIPE_CONTROL(dev))
4688                 i915_gem_cleanup_pipe_control(dev);
4689         return ret;
4690 }
4691
4692 void
4693 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4694 {
4695         drm_i915_private_t *dev_priv = dev->dev_private;
4696
4697         intel_cleanup_ring_buffer(&dev_priv->render_ring);
4698         intel_cleanup_ring_buffer(&dev_priv->bsd_ring);
4699         intel_cleanup_ring_buffer(&dev_priv->blt_ring);
4700         if (HAS_PIPE_CONTROL(dev))
4701                 i915_gem_cleanup_pipe_control(dev);
4702 }
4703
4704 int
4705 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4706                        struct drm_file *file_priv)
4707 {
4708         drm_i915_private_t *dev_priv = dev->dev_private;
4709         int ret;
4710
4711         if (drm_core_check_feature(dev, DRIVER_MODESET))
4712                 return 0;
4713
4714         if (atomic_read(&dev_priv->mm.wedged)) {
4715                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4716                 atomic_set(&dev_priv->mm.wedged, 0);
4717         }
4718
4719         mutex_lock(&dev->struct_mutex);
4720         dev_priv->mm.suspended = 0;
4721
4722         ret = i915_gem_init_ringbuffer(dev);
4723         if (ret != 0) {
4724                 mutex_unlock(&dev->struct_mutex);
4725                 return ret;
4726         }
4727
4728         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4729         BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4730         BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
4731         BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
4732         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4733         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4734         BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4735         BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
4736         BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
4737         mutex_unlock(&dev->struct_mutex);
4738
4739         ret = drm_irq_install(dev);
4740         if (ret)
4741                 goto cleanup_ringbuffer;
4742
4743         return 0;
4744
4745 cleanup_ringbuffer:
4746         mutex_lock(&dev->struct_mutex);
4747         i915_gem_cleanup_ringbuffer(dev);
4748         dev_priv->mm.suspended = 1;
4749         mutex_unlock(&dev->struct_mutex);
4750
4751         return ret;
4752 }
4753
4754 int
4755 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4756                        struct drm_file *file_priv)
4757 {
4758         if (drm_core_check_feature(dev, DRIVER_MODESET))
4759                 return 0;
4760
4761         drm_irq_uninstall(dev);
4762         return i915_gem_idle(dev);
4763 }
4764
4765 void
4766 i915_gem_lastclose(struct drm_device *dev)
4767 {
4768         int ret;
4769
4770         if (drm_core_check_feature(dev, DRIVER_MODESET))
4771                 return;
4772
4773         ret = i915_gem_idle(dev);
4774         if (ret)
4775                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4776 }
4777
4778 static void
4779 init_ring_lists(struct intel_ring_buffer *ring)
4780 {
4781         INIT_LIST_HEAD(&ring->active_list);
4782         INIT_LIST_HEAD(&ring->request_list);
4783         INIT_LIST_HEAD(&ring->gpu_write_list);
4784 }
4785
4786 void
4787 i915_gem_load(struct drm_device *dev)
4788 {
4789         int i;
4790         drm_i915_private_t *dev_priv = dev->dev_private;
4791
4792         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4793         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4794         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4795         INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
4796         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4797         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4798         init_ring_lists(&dev_priv->render_ring);
4799         init_ring_lists(&dev_priv->bsd_ring);
4800         init_ring_lists(&dev_priv->blt_ring);
4801         for (i = 0; i < 16; i++)
4802                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4803         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4804                           i915_gem_retire_work_handler);
4805         init_completion(&dev_priv->error_completion);
4806
4807         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4808         if (IS_GEN3(dev)) {
4809                 u32 tmp = I915_READ(MI_ARB_STATE);
4810                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4811                         /* arb state is a masked write, so set bit + bit in mask */
4812                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4813                         I915_WRITE(MI_ARB_STATE, tmp);
4814                 }
4815         }
4816
4817         /* Old X drivers will take 0-2 for front, back, depth buffers */
4818         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4819                 dev_priv->fence_reg_start = 3;
4820
4821         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4822                 dev_priv->num_fence_regs = 16;
4823         else
4824                 dev_priv->num_fence_regs = 8;
4825
4826         /* Initialize fence registers to zero */
4827         switch (INTEL_INFO(dev)->gen) {
4828         case 6:
4829                 for (i = 0; i < 16; i++)
4830                         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
4831                 break;
4832         case 5:
4833         case 4:
4834                 for (i = 0; i < 16; i++)
4835                         I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4836                 break;
4837         case 3:
4838                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4839                         for (i = 0; i < 8; i++)
4840                                 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4841         case 2:
4842                 for (i = 0; i < 8; i++)
4843                         I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4844                 break;
4845         }
4846         i915_gem_detect_bit_6_swizzle(dev);
4847         init_waitqueue_head(&dev_priv->pending_flip_queue);
4848
4849         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4850         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4851         register_shrinker(&dev_priv->mm.inactive_shrinker);
4852 }
4853
4854 /*
4855  * Create a physically contiguous memory object for this object
4856  * e.g. for cursor + overlay regs
4857  */
4858 static int i915_gem_init_phys_object(struct drm_device *dev,
4859                                      int id, int size, int align)
4860 {
4861         drm_i915_private_t *dev_priv = dev->dev_private;
4862         struct drm_i915_gem_phys_object *phys_obj;
4863         int ret;
4864
4865         if (dev_priv->mm.phys_objs[id - 1] || !size)
4866                 return 0;
4867
4868         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4869         if (!phys_obj)
4870                 return -ENOMEM;
4871
4872         phys_obj->id = id;
4873
4874         phys_obj->handle = drm_pci_alloc(dev, size, align);
4875         if (!phys_obj->handle) {
4876                 ret = -ENOMEM;
4877                 goto kfree_obj;
4878         }
4879 #ifdef CONFIG_X86
4880         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4881 #endif
4882
4883         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4884
4885         return 0;
4886 kfree_obj:
4887         kfree(phys_obj);
4888         return ret;
4889 }
4890
4891 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4892 {
4893         drm_i915_private_t *dev_priv = dev->dev_private;
4894         struct drm_i915_gem_phys_object *phys_obj;
4895
4896         if (!dev_priv->mm.phys_objs[id - 1])
4897                 return;
4898
4899         phys_obj = dev_priv->mm.phys_objs[id - 1];
4900         if (phys_obj->cur_obj) {
4901                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4902         }
4903
4904 #ifdef CONFIG_X86
4905         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4906 #endif
4907         drm_pci_free(dev, phys_obj->handle);
4908         kfree(phys_obj);
4909         dev_priv->mm.phys_objs[id - 1] = NULL;
4910 }
4911
4912 void i915_gem_free_all_phys_object(struct drm_device *dev)
4913 {
4914         int i;
4915
4916         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4917                 i915_gem_free_phys_object(dev, i);
4918 }
4919
4920 void i915_gem_detach_phys_object(struct drm_device *dev,
4921                                  struct drm_gem_object *obj)
4922 {
4923         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
4924         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4925         char *vaddr;
4926         int i;
4927         int page_count;
4928
4929         if (!obj_priv->phys_obj)
4930                 return;
4931         vaddr = obj_priv->phys_obj->handle->vaddr;
4932
4933         page_count = obj->size / PAGE_SIZE;
4934
4935         for (i = 0; i < page_count; i++) {
4936                 struct page *page = read_cache_page_gfp(mapping, i,
4937                                                         GFP_HIGHUSER | __GFP_RECLAIMABLE);
4938                 if (!IS_ERR(page)) {
4939                         char *dst = kmap_atomic(page);
4940                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4941                         kunmap_atomic(dst);
4942
4943                         drm_clflush_pages(&page, 1);
4944
4945                         set_page_dirty(page);
4946                         mark_page_accessed(page);
4947                         page_cache_release(page);
4948                 }
4949         }
4950         drm_agp_chipset_flush(dev);
4951
4952         obj_priv->phys_obj->cur_obj = NULL;
4953         obj_priv->phys_obj = NULL;
4954 }
4955
4956 int
4957 i915_gem_attach_phys_object(struct drm_device *dev,
4958                             struct drm_gem_object *obj,
4959                             int id,
4960                             int align)
4961 {
4962         struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
4963         drm_i915_private_t *dev_priv = dev->dev_private;
4964         struct drm_i915_gem_object *obj_priv;
4965         int ret = 0;
4966         int page_count;
4967         int i;
4968
4969         if (id > I915_MAX_PHYS_OBJECT)
4970                 return -EINVAL;
4971
4972         obj_priv = to_intel_bo(obj);
4973
4974         if (obj_priv->phys_obj) {
4975                 if (obj_priv->phys_obj->id == id)
4976                         return 0;
4977                 i915_gem_detach_phys_object(dev, obj);
4978         }
4979
4980         /* create a new object */
4981         if (!dev_priv->mm.phys_objs[id - 1]) {
4982                 ret = i915_gem_init_phys_object(dev, id,
4983                                                 obj->size, align);
4984                 if (ret) {
4985                         DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4986                         return ret;
4987                 }
4988         }
4989
4990         /* bind to the object */
4991         obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4992         obj_priv->phys_obj->cur_obj = obj;
4993
4994         page_count = obj->size / PAGE_SIZE;
4995
4996         for (i = 0; i < page_count; i++) {
4997                 struct page *page;
4998                 char *dst, *src;
4999
5000                 page = read_cache_page_gfp(mapping, i,
5001                                            GFP_HIGHUSER | __GFP_RECLAIMABLE);
5002                 if (IS_ERR(page))
5003                         return PTR_ERR(page);
5004
5005                 src = kmap_atomic(page);
5006                 dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
5007                 memcpy(dst, src, PAGE_SIZE);
5008                 kunmap_atomic(src);
5009
5010                 mark_page_accessed(page);
5011                 page_cache_release(page);
5012         }
5013
5014         return 0;
5015 }
5016
5017 static int
5018 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5019                      struct drm_i915_gem_pwrite *args,
5020                      struct drm_file *file_priv)
5021 {
5022         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5023         void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
5024         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
5025
5026         DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
5027
5028         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
5029                 unsigned long unwritten;
5030
5031                 /* The physical object once assigned is fixed for the lifetime
5032                  * of the obj, so we can safely drop the lock and continue
5033                  * to access vaddr.
5034                  */
5035                 mutex_unlock(&dev->struct_mutex);
5036                 unwritten = copy_from_user(vaddr, user_data, args->size);
5037                 mutex_lock(&dev->struct_mutex);
5038                 if (unwritten)
5039                         return -EFAULT;
5040         }
5041
5042         drm_agp_chipset_flush(dev);
5043         return 0;
5044 }
5045
5046 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5047 {
5048         struct drm_i915_file_private *file_priv = file->driver_priv;
5049
5050         /* Clean up our request list when the client is going away, so that
5051          * later retire_requests won't dereference our soon-to-be-gone
5052          * file_priv.
5053          */
5054         spin_lock(&file_priv->mm.lock);
5055         while (!list_empty(&file_priv->mm.request_list)) {
5056                 struct drm_i915_gem_request *request;
5057
5058                 request = list_first_entry(&file_priv->mm.request_list,
5059                                            struct drm_i915_gem_request,
5060                                            client_list);
5061                 list_del(&request->client_list);
5062                 request->file_priv = NULL;
5063         }
5064         spin_unlock(&file_priv->mm.lock);
5065 }
5066
5067 static int
5068 i915_gpu_is_active(struct drm_device *dev)
5069 {
5070         drm_i915_private_t *dev_priv = dev->dev_private;
5071         int lists_empty;
5072
5073         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
5074                       list_empty(&dev_priv->mm.active_list);
5075
5076         return !lists_empty;
5077 }
5078
5079 static int
5080 i915_gem_inactive_shrink(struct shrinker *shrinker,
5081                          int nr_to_scan,
5082                          gfp_t gfp_mask)
5083 {
5084         struct drm_i915_private *dev_priv =
5085                 container_of(shrinker,
5086                              struct drm_i915_private,
5087                              mm.inactive_shrinker);
5088         struct drm_device *dev = dev_priv->dev;
5089         struct drm_i915_gem_object *obj, *next;
5090         int cnt;
5091
5092         if (!mutex_trylock(&dev->struct_mutex))
5093                 return 0;
5094
5095         /* "fast-path" to count number of available objects */
5096         if (nr_to_scan == 0) {
5097                 cnt = 0;
5098                 list_for_each_entry(obj,
5099                                     &dev_priv->mm.inactive_list,
5100                                     mm_list)
5101                         cnt++;
5102                 mutex_unlock(&dev->struct_mutex);
5103                 return cnt / 100 * sysctl_vfs_cache_pressure;
5104         }
5105
5106 rescan:
5107         /* first scan for clean buffers */
5108         i915_gem_retire_requests(dev);
5109
5110         list_for_each_entry_safe(obj, next,
5111                                  &dev_priv->mm.inactive_list,
5112                                  mm_list) {
5113                 if (i915_gem_object_is_purgeable(obj)) {
5114                         i915_gem_object_unbind(&obj->base);
5115                         if (--nr_to_scan == 0)
5116                                 break;
5117                 }
5118         }
5119
5120         /* second pass, evict/count anything still on the inactive list */
5121         cnt = 0;
5122         list_for_each_entry_safe(obj, next,
5123                                  &dev_priv->mm.inactive_list,
5124                                  mm_list) {
5125                 if (nr_to_scan) {
5126                         i915_gem_object_unbind(&obj->base);
5127                         nr_to_scan--;
5128                 } else
5129                         cnt++;
5130         }
5131
5132         if (nr_to_scan && i915_gpu_is_active(dev)) {
5133                 /*
5134                  * We are desperate for pages, so as a last resort, wait
5135                  * for the GPU to finish and discard whatever we can.
5136                  * This has a dramatic impact to reduce the number of
5137                  * OOM-killer events whilst running the GPU aggressively.
5138                  */
5139                 if (i915_gpu_idle(dev) == 0)
5140                         goto rescan;
5141         }
5142         mutex_unlock(&dev->struct_mutex);
5143         return cnt / 100 * sysctl_vfs_cache_pressure;
5144 }