]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Attempt to prefault user pages for pread/pwrite
[mv-sheeva.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/intel-gtt.h>
38
39 static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
40
41 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
42                                                   bool pipelined);
43 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
44 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
45 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46                                              int write);
47 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48                                                      uint64_t offset,
49                                                      uint64_t size);
50 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
51 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
52                                           bool interruptible);
53 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
54                                            unsigned alignment);
55 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
56 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
57                                 struct drm_i915_gem_pwrite *args,
58                                 struct drm_file *file_priv);
59 static void i915_gem_free_object_tail(struct drm_gem_object *obj);
60
61 static int
62 i915_gem_object_get_pages(struct drm_gem_object *obj,
63                           gfp_t gfpmask);
64
65 static void
66 i915_gem_object_put_pages(struct drm_gem_object *obj);
67
68 static LIST_HEAD(shrink_list);
69 static DEFINE_SPINLOCK(shrink_list_lock);
70
71 /* some bookkeeping */
72 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
73                                   size_t size)
74 {
75         dev_priv->mm.object_count++;
76         dev_priv->mm.object_memory += size;
77 }
78
79 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
80                                      size_t size)
81 {
82         dev_priv->mm.object_count--;
83         dev_priv->mm.object_memory -= size;
84 }
85
86 static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
87                                   size_t size)
88 {
89         dev_priv->mm.gtt_count++;
90         dev_priv->mm.gtt_memory += size;
91 }
92
93 static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
94                                      size_t size)
95 {
96         dev_priv->mm.gtt_count--;
97         dev_priv->mm.gtt_memory -= size;
98 }
99
100 static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
101                                   size_t size)
102 {
103         dev_priv->mm.pin_count++;
104         dev_priv->mm.pin_memory += size;
105 }
106
107 static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
108                                      size_t size)
109 {
110         dev_priv->mm.pin_count--;
111         dev_priv->mm.pin_memory -= size;
112 }
113
114 int
115 i915_gem_check_is_wedged(struct drm_device *dev)
116 {
117         struct drm_i915_private *dev_priv = dev->dev_private;
118         struct completion *x = &dev_priv->error_completion;
119         unsigned long flags;
120         int ret;
121
122         if (!atomic_read(&dev_priv->mm.wedged))
123                 return 0;
124
125         ret = wait_for_completion_interruptible(x);
126         if (ret)
127                 return ret;
128
129         /* Success, we reset the GPU! */
130         if (!atomic_read(&dev_priv->mm.wedged))
131                 return 0;
132
133         /* GPU is hung, bump the completion count to account for
134          * the token we just consumed so that we never hit zero and
135          * end up waiting upon a subsequent completion event that
136          * will never happen.
137          */
138         spin_lock_irqsave(&x->wait.lock, flags);
139         x->done++;
140         spin_unlock_irqrestore(&x->wait.lock, flags);
141         return -EIO;
142 }
143
144 static int i915_mutex_lock_interruptible(struct drm_device *dev)
145 {
146         struct drm_i915_private *dev_priv = dev->dev_private;
147         int ret;
148
149         ret = i915_gem_check_is_wedged(dev);
150         if (ret)
151                 return ret;
152
153         ret = mutex_lock_interruptible(&dev->struct_mutex);
154         if (ret)
155                 return ret;
156
157         if (atomic_read(&dev_priv->mm.wedged)) {
158                 mutex_unlock(&dev->struct_mutex);
159                 return -EAGAIN;
160         }
161
162         WARN_ON(i915_verify_lists(dev));
163         return 0;
164 }
165
166 static inline bool
167 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
168 {
169         return obj_priv->gtt_space &&
170                 !obj_priv->active &&
171                 obj_priv->pin_count == 0;
172 }
173
174 int i915_gem_do_init(struct drm_device *dev,
175                      unsigned long start,
176                      unsigned long end)
177 {
178         drm_i915_private_t *dev_priv = dev->dev_private;
179
180         if (start >= end ||
181             (start & (PAGE_SIZE - 1)) != 0 ||
182             (end & (PAGE_SIZE - 1)) != 0) {
183                 return -EINVAL;
184         }
185
186         drm_mm_init(&dev_priv->mm.gtt_space, start,
187                     end - start);
188
189         dev_priv->mm.gtt_total = end - start;
190
191         return 0;
192 }
193
194 int
195 i915_gem_init_ioctl(struct drm_device *dev, void *data,
196                     struct drm_file *file_priv)
197 {
198         struct drm_i915_gem_init *args = data;
199         int ret;
200
201         mutex_lock(&dev->struct_mutex);
202         ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
203         mutex_unlock(&dev->struct_mutex);
204
205         return ret;
206 }
207
208 int
209 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
210                             struct drm_file *file_priv)
211 {
212         struct drm_i915_private *dev_priv = dev->dev_private;
213         struct drm_i915_gem_get_aperture *args = data;
214
215         if (!(dev->driver->driver_features & DRIVER_GEM))
216                 return -ENODEV;
217
218         mutex_lock(&dev->struct_mutex);
219         args->aper_size = dev_priv->mm.gtt_total;
220         args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
221         mutex_unlock(&dev->struct_mutex);
222
223         return 0;
224 }
225
226
227 /**
228  * Creates a new mm object and returns a handle to it.
229  */
230 int
231 i915_gem_create_ioctl(struct drm_device *dev, void *data,
232                       struct drm_file *file_priv)
233 {
234         struct drm_i915_gem_create *args = data;
235         struct drm_gem_object *obj;
236         int ret;
237         u32 handle;
238
239         args->size = roundup(args->size, PAGE_SIZE);
240
241         /* Allocate the new object */
242         obj = i915_gem_alloc_object(dev, args->size);
243         if (obj == NULL)
244                 return -ENOMEM;
245
246         ret = drm_gem_handle_create(file_priv, obj, &handle);
247         if (ret) {
248                 drm_gem_object_release(obj);
249                 i915_gem_info_remove_obj(dev->dev_private, obj->size);
250                 kfree(obj);
251                 return ret;
252         }
253
254         /* drop reference from allocate - handle holds it now */
255         drm_gem_object_unreference(obj);
256         trace_i915_gem_object_create(obj);
257
258         args->handle = handle;
259         return 0;
260 }
261
262 static inline int
263 fast_shmem_read(struct page **pages,
264                 loff_t page_base, int page_offset,
265                 char __user *data,
266                 int length)
267 {
268         int unwritten;
269         char *vaddr;
270
271         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
272         unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
273         kunmap_atomic(vaddr, KM_USER0);
274
275         return unwritten ? -EFAULT : 0;
276 }
277
278 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
279 {
280         drm_i915_private_t *dev_priv = obj->dev->dev_private;
281         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
282
283         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
284                 obj_priv->tiling_mode != I915_TILING_NONE;
285 }
286
287 static inline void
288 slow_shmem_copy(struct page *dst_page,
289                 int dst_offset,
290                 struct page *src_page,
291                 int src_offset,
292                 int length)
293 {
294         char *dst_vaddr, *src_vaddr;
295
296         dst_vaddr = kmap(dst_page);
297         src_vaddr = kmap(src_page);
298
299         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
300
301         kunmap(src_page);
302         kunmap(dst_page);
303 }
304
305 static inline void
306 slow_shmem_bit17_copy(struct page *gpu_page,
307                       int gpu_offset,
308                       struct page *cpu_page,
309                       int cpu_offset,
310                       int length,
311                       int is_read)
312 {
313         char *gpu_vaddr, *cpu_vaddr;
314
315         /* Use the unswizzled path if this page isn't affected. */
316         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
317                 if (is_read)
318                         return slow_shmem_copy(cpu_page, cpu_offset,
319                                                gpu_page, gpu_offset, length);
320                 else
321                         return slow_shmem_copy(gpu_page, gpu_offset,
322                                                cpu_page, cpu_offset, length);
323         }
324
325         gpu_vaddr = kmap(gpu_page);
326         cpu_vaddr = kmap(cpu_page);
327
328         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
329          * XORing with the other bits (A9 for Y, A9 and A10 for X)
330          */
331         while (length > 0) {
332                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
333                 int this_length = min(cacheline_end - gpu_offset, length);
334                 int swizzled_gpu_offset = gpu_offset ^ 64;
335
336                 if (is_read) {
337                         memcpy(cpu_vaddr + cpu_offset,
338                                gpu_vaddr + swizzled_gpu_offset,
339                                this_length);
340                 } else {
341                         memcpy(gpu_vaddr + swizzled_gpu_offset,
342                                cpu_vaddr + cpu_offset,
343                                this_length);
344                 }
345                 cpu_offset += this_length;
346                 gpu_offset += this_length;
347                 length -= this_length;
348         }
349
350         kunmap(cpu_page);
351         kunmap(gpu_page);
352 }
353
354 /**
355  * This is the fast shmem pread path, which attempts to copy_from_user directly
356  * from the backing pages of the object to the user's address space.  On a
357  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
358  */
359 static int
360 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
361                           struct drm_i915_gem_pread *args,
362                           struct drm_file *file_priv)
363 {
364         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
365         ssize_t remain;
366         loff_t offset, page_base;
367         char __user *user_data;
368         int page_offset, page_length;
369         int ret;
370
371         user_data = (char __user *) (uintptr_t) args->data_ptr;
372         remain = args->size;
373
374         ret = i915_mutex_lock_interruptible(dev);
375         if (ret)
376                 return ret;
377
378         ret = i915_gem_object_get_pages(obj, 0);
379         if (ret != 0)
380                 goto fail_unlock;
381
382         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
383                                                         args->size);
384         if (ret != 0)
385                 goto fail_put_pages;
386
387         obj_priv = to_intel_bo(obj);
388         offset = args->offset;
389
390         while (remain > 0) {
391                 /* Operation in this page
392                  *
393                  * page_base = page offset within aperture
394                  * page_offset = offset within page
395                  * page_length = bytes to copy for this page
396                  */
397                 page_base = (offset & ~(PAGE_SIZE-1));
398                 page_offset = offset & (PAGE_SIZE-1);
399                 page_length = remain;
400                 if ((page_offset + remain) > PAGE_SIZE)
401                         page_length = PAGE_SIZE - page_offset;
402
403                 ret = fast_shmem_read(obj_priv->pages,
404                                       page_base, page_offset,
405                                       user_data, page_length);
406                 if (ret)
407                         goto fail_put_pages;
408
409                 remain -= page_length;
410                 user_data += page_length;
411                 offset += page_length;
412         }
413
414 fail_put_pages:
415         i915_gem_object_put_pages(obj);
416 fail_unlock:
417         mutex_unlock(&dev->struct_mutex);
418
419         return ret;
420 }
421
422 static int
423 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
424 {
425         int ret;
426
427         ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
428
429         /* If we've insufficient memory to map in the pages, attempt
430          * to make some space by throwing out some old buffers.
431          */
432         if (ret == -ENOMEM) {
433                 struct drm_device *dev = obj->dev;
434
435                 ret = i915_gem_evict_something(dev, obj->size,
436                                                i915_gem_get_gtt_alignment(obj));
437                 if (ret)
438                         return ret;
439
440                 ret = i915_gem_object_get_pages(obj, 0);
441         }
442
443         return ret;
444 }
445
446 /**
447  * This is the fallback shmem pread path, which allocates temporary storage
448  * in kernel space to copy_to_user into outside of the struct_mutex, so we
449  * can copy out of the object's backing pages while holding the struct mutex
450  * and not take page faults.
451  */
452 static int
453 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
454                           struct drm_i915_gem_pread *args,
455                           struct drm_file *file_priv)
456 {
457         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
458         struct mm_struct *mm = current->mm;
459         struct page **user_pages;
460         ssize_t remain;
461         loff_t offset, pinned_pages, i;
462         loff_t first_data_page, last_data_page, num_pages;
463         int shmem_page_index, shmem_page_offset;
464         int data_page_index,  data_page_offset;
465         int page_length;
466         int ret;
467         uint64_t data_ptr = args->data_ptr;
468         int do_bit17_swizzling;
469
470         remain = args->size;
471
472         /* Pin the user pages containing the data.  We can't fault while
473          * holding the struct mutex, yet we want to hold it while
474          * dereferencing the user data.
475          */
476         first_data_page = data_ptr / PAGE_SIZE;
477         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
478         num_pages = last_data_page - first_data_page + 1;
479
480         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
481         if (user_pages == NULL)
482                 return -ENOMEM;
483
484         down_read(&mm->mmap_sem);
485         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
486                                       num_pages, 1, 0, user_pages, NULL);
487         up_read(&mm->mmap_sem);
488         if (pinned_pages < num_pages) {
489                 ret = -EFAULT;
490                 goto fail_put_user_pages;
491         }
492
493         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
494
495         ret = i915_mutex_lock_interruptible(dev);
496         if (ret)
497                 goto fail_put_user_pages;
498
499         ret = i915_gem_object_get_pages_or_evict(obj);
500         if (ret)
501                 goto fail_unlock;
502
503         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
504                                                         args->size);
505         if (ret != 0)
506                 goto fail_put_pages;
507
508         obj_priv = to_intel_bo(obj);
509         offset = args->offset;
510
511         while (remain > 0) {
512                 /* Operation in this page
513                  *
514                  * shmem_page_index = page number within shmem file
515                  * shmem_page_offset = offset within page in shmem file
516                  * data_page_index = page number in get_user_pages return
517                  * data_page_offset = offset with data_page_index page.
518                  * page_length = bytes to copy for this page
519                  */
520                 shmem_page_index = offset / PAGE_SIZE;
521                 shmem_page_offset = offset & ~PAGE_MASK;
522                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
523                 data_page_offset = data_ptr & ~PAGE_MASK;
524
525                 page_length = remain;
526                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
527                         page_length = PAGE_SIZE - shmem_page_offset;
528                 if ((data_page_offset + page_length) > PAGE_SIZE)
529                         page_length = PAGE_SIZE - data_page_offset;
530
531                 if (do_bit17_swizzling) {
532                         slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
533                                               shmem_page_offset,
534                                               user_pages[data_page_index],
535                                               data_page_offset,
536                                               page_length,
537                                               1);
538                 } else {
539                         slow_shmem_copy(user_pages[data_page_index],
540                                         data_page_offset,
541                                         obj_priv->pages[shmem_page_index],
542                                         shmem_page_offset,
543                                         page_length);
544                 }
545
546                 remain -= page_length;
547                 data_ptr += page_length;
548                 offset += page_length;
549         }
550
551 fail_put_pages:
552         i915_gem_object_put_pages(obj);
553 fail_unlock:
554         mutex_unlock(&dev->struct_mutex);
555 fail_put_user_pages:
556         for (i = 0; i < pinned_pages; i++) {
557                 SetPageDirty(user_pages[i]);
558                 page_cache_release(user_pages[i]);
559         }
560         drm_free_large(user_pages);
561
562         return ret;
563 }
564
565 /**
566  * Reads data from the object referenced by handle.
567  *
568  * On error, the contents of *data are undefined.
569  */
570 int
571 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
572                      struct drm_file *file_priv)
573 {
574         struct drm_i915_gem_pread *args = data;
575         struct drm_gem_object *obj;
576         struct drm_i915_gem_object *obj_priv;
577         int ret = 0;
578
579         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
580         if (obj == NULL)
581                 return -ENOENT;
582         obj_priv = to_intel_bo(obj);
583
584         /* Bounds check source.  */
585         if (args->offset > obj->size || args->size > obj->size - args->offset) {
586                 ret = -EINVAL;
587                 goto out;
588         }
589
590         if (args->size == 0)
591                 goto out;
592
593         if (!access_ok(VERIFY_WRITE,
594                        (char __user *)(uintptr_t)args->data_ptr,
595                        args->size)) {
596                 ret = -EFAULT;
597                 goto out;
598         }
599
600         ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
601                                        args->size);
602         if (ret) {
603                 ret = -EFAULT;
604                 goto out;
605         }
606
607         if (i915_gem_object_needs_bit17_swizzle(obj)) {
608                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
609         } else {
610                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
611                 if (ret != 0)
612                         ret = i915_gem_shmem_pread_slow(dev, obj, args,
613                                                         file_priv);
614         }
615
616 out:
617         drm_gem_object_unreference_unlocked(obj);
618         return ret;
619 }
620
621 /* This is the fast write path which cannot handle
622  * page faults in the source data
623  */
624
625 static inline int
626 fast_user_write(struct io_mapping *mapping,
627                 loff_t page_base, int page_offset,
628                 char __user *user_data,
629                 int length)
630 {
631         char *vaddr_atomic;
632         unsigned long unwritten;
633
634         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
635         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
636                                                       user_data, length);
637         io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
638         if (unwritten)
639                 return -EFAULT;
640         return 0;
641 }
642
643 /* Here's the write path which can sleep for
644  * page faults
645  */
646
647 static inline void
648 slow_kernel_write(struct io_mapping *mapping,
649                   loff_t gtt_base, int gtt_offset,
650                   struct page *user_page, int user_offset,
651                   int length)
652 {
653         char __iomem *dst_vaddr;
654         char *src_vaddr;
655
656         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
657         src_vaddr = kmap(user_page);
658
659         memcpy_toio(dst_vaddr + gtt_offset,
660                     src_vaddr + user_offset,
661                     length);
662
663         kunmap(user_page);
664         io_mapping_unmap(dst_vaddr);
665 }
666
667 static inline int
668 fast_shmem_write(struct page **pages,
669                  loff_t page_base, int page_offset,
670                  char __user *data,
671                  int length)
672 {
673         int unwritten;
674         char *vaddr;
675
676         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
677         unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
678         kunmap_atomic(vaddr, KM_USER0);
679
680         return unwritten ? -EFAULT : 0;
681 }
682
683 /**
684  * This is the fast pwrite path, where we copy the data directly from the
685  * user into the GTT, uncached.
686  */
687 static int
688 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
689                          struct drm_i915_gem_pwrite *args,
690                          struct drm_file *file_priv)
691 {
692         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
693         drm_i915_private_t *dev_priv = dev->dev_private;
694         ssize_t remain;
695         loff_t offset, page_base;
696         char __user *user_data;
697         int page_offset, page_length;
698         int ret;
699
700         user_data = (char __user *) (uintptr_t) args->data_ptr;
701         remain = args->size;
702
703         ret = i915_mutex_lock_interruptible(dev);
704         if (ret)
705                 return ret;
706
707         ret = i915_gem_object_pin(obj, 0);
708         if (ret) {
709                 mutex_unlock(&dev->struct_mutex);
710                 return ret;
711         }
712         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
713         if (ret)
714                 goto fail;
715
716         obj_priv = to_intel_bo(obj);
717         offset = obj_priv->gtt_offset + args->offset;
718
719         while (remain > 0) {
720                 /* Operation in this page
721                  *
722                  * page_base = page offset within aperture
723                  * page_offset = offset within page
724                  * page_length = bytes to copy for this page
725                  */
726                 page_base = (offset & ~(PAGE_SIZE-1));
727                 page_offset = offset & (PAGE_SIZE-1);
728                 page_length = remain;
729                 if ((page_offset + remain) > PAGE_SIZE)
730                         page_length = PAGE_SIZE - page_offset;
731
732                 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
733                                        page_offset, user_data, page_length);
734
735                 /* If we get a fault while copying data, then (presumably) our
736                  * source page isn't available.  Return the error and we'll
737                  * retry in the slow path.
738                  */
739                 if (ret)
740                         goto fail;
741
742                 remain -= page_length;
743                 user_data += page_length;
744                 offset += page_length;
745         }
746
747 fail:
748         i915_gem_object_unpin(obj);
749         mutex_unlock(&dev->struct_mutex);
750
751         return ret;
752 }
753
754 /**
755  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
756  * the memory and maps it using kmap_atomic for copying.
757  *
758  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
759  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
760  */
761 static int
762 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
763                          struct drm_i915_gem_pwrite *args,
764                          struct drm_file *file_priv)
765 {
766         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
767         drm_i915_private_t *dev_priv = dev->dev_private;
768         ssize_t remain;
769         loff_t gtt_page_base, offset;
770         loff_t first_data_page, last_data_page, num_pages;
771         loff_t pinned_pages, i;
772         struct page **user_pages;
773         struct mm_struct *mm = current->mm;
774         int gtt_page_offset, data_page_offset, data_page_index, page_length;
775         int ret;
776         uint64_t data_ptr = args->data_ptr;
777
778         remain = args->size;
779
780         /* Pin the user pages containing the data.  We can't fault while
781          * holding the struct mutex, and all of the pwrite implementations
782          * want to hold it while dereferencing the user data.
783          */
784         first_data_page = data_ptr / PAGE_SIZE;
785         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
786         num_pages = last_data_page - first_data_page + 1;
787
788         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
789         if (user_pages == NULL)
790                 return -ENOMEM;
791
792         down_read(&mm->mmap_sem);
793         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
794                                       num_pages, 0, 0, user_pages, NULL);
795         up_read(&mm->mmap_sem);
796         if (pinned_pages < num_pages) {
797                 ret = -EFAULT;
798                 goto out_unpin_pages;
799         }
800
801         ret = i915_mutex_lock_interruptible(dev);
802         if (ret)
803                 goto out_unpin_pages;
804
805         ret = i915_gem_object_pin(obj, 0);
806         if (ret)
807                 goto out_unlock;
808
809         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
810         if (ret)
811                 goto out_unpin_object;
812
813         obj_priv = to_intel_bo(obj);
814         offset = obj_priv->gtt_offset + args->offset;
815
816         while (remain > 0) {
817                 /* Operation in this page
818                  *
819                  * gtt_page_base = page offset within aperture
820                  * gtt_page_offset = offset within page in aperture
821                  * data_page_index = page number in get_user_pages return
822                  * data_page_offset = offset with data_page_index page.
823                  * page_length = bytes to copy for this page
824                  */
825                 gtt_page_base = offset & PAGE_MASK;
826                 gtt_page_offset = offset & ~PAGE_MASK;
827                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
828                 data_page_offset = data_ptr & ~PAGE_MASK;
829
830                 page_length = remain;
831                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
832                         page_length = PAGE_SIZE - gtt_page_offset;
833                 if ((data_page_offset + page_length) > PAGE_SIZE)
834                         page_length = PAGE_SIZE - data_page_offset;
835
836                 slow_kernel_write(dev_priv->mm.gtt_mapping,
837                                   gtt_page_base, gtt_page_offset,
838                                   user_pages[data_page_index],
839                                   data_page_offset,
840                                   page_length);
841
842                 remain -= page_length;
843                 offset += page_length;
844                 data_ptr += page_length;
845         }
846
847 out_unpin_object:
848         i915_gem_object_unpin(obj);
849 out_unlock:
850         mutex_unlock(&dev->struct_mutex);
851 out_unpin_pages:
852         for (i = 0; i < pinned_pages; i++)
853                 page_cache_release(user_pages[i]);
854         drm_free_large(user_pages);
855
856         return ret;
857 }
858
859 /**
860  * This is the fast shmem pwrite path, which attempts to directly
861  * copy_from_user into the kmapped pages backing the object.
862  */
863 static int
864 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
865                            struct drm_i915_gem_pwrite *args,
866                            struct drm_file *file_priv)
867 {
868         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
869         ssize_t remain;
870         loff_t offset, page_base;
871         char __user *user_data;
872         int page_offset, page_length;
873         int ret;
874
875         user_data = (char __user *) (uintptr_t) args->data_ptr;
876         remain = args->size;
877
878         ret = i915_mutex_lock_interruptible(dev);
879         if (ret)
880                 return ret;
881
882         ret = i915_gem_object_get_pages(obj, 0);
883         if (ret != 0)
884                 goto fail_unlock;
885
886         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
887         if (ret != 0)
888                 goto fail_put_pages;
889
890         obj_priv = to_intel_bo(obj);
891         offset = args->offset;
892         obj_priv->dirty = 1;
893
894         while (remain > 0) {
895                 /* Operation in this page
896                  *
897                  * page_base = page offset within aperture
898                  * page_offset = offset within page
899                  * page_length = bytes to copy for this page
900                  */
901                 page_base = (offset & ~(PAGE_SIZE-1));
902                 page_offset = offset & (PAGE_SIZE-1);
903                 page_length = remain;
904                 if ((page_offset + remain) > PAGE_SIZE)
905                         page_length = PAGE_SIZE - page_offset;
906
907                 ret = fast_shmem_write(obj_priv->pages,
908                                        page_base, page_offset,
909                                        user_data, page_length);
910                 if (ret)
911                         goto fail_put_pages;
912
913                 remain -= page_length;
914                 user_data += page_length;
915                 offset += page_length;
916         }
917
918 fail_put_pages:
919         i915_gem_object_put_pages(obj);
920 fail_unlock:
921         mutex_unlock(&dev->struct_mutex);
922
923         return ret;
924 }
925
926 /**
927  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
928  * the memory and maps it using kmap_atomic for copying.
929  *
930  * This avoids taking mmap_sem for faulting on the user's address while the
931  * struct_mutex is held.
932  */
933 static int
934 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
935                            struct drm_i915_gem_pwrite *args,
936                            struct drm_file *file_priv)
937 {
938         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
939         struct mm_struct *mm = current->mm;
940         struct page **user_pages;
941         ssize_t remain;
942         loff_t offset, pinned_pages, i;
943         loff_t first_data_page, last_data_page, num_pages;
944         int shmem_page_index, shmem_page_offset;
945         int data_page_index,  data_page_offset;
946         int page_length;
947         int ret;
948         uint64_t data_ptr = args->data_ptr;
949         int do_bit17_swizzling;
950
951         remain = args->size;
952
953         /* Pin the user pages containing the data.  We can't fault while
954          * holding the struct mutex, and all of the pwrite implementations
955          * want to hold it while dereferencing the user data.
956          */
957         first_data_page = data_ptr / PAGE_SIZE;
958         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
959         num_pages = last_data_page - first_data_page + 1;
960
961         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
962         if (user_pages == NULL)
963                 return -ENOMEM;
964
965         down_read(&mm->mmap_sem);
966         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
967                                       num_pages, 0, 0, user_pages, NULL);
968         up_read(&mm->mmap_sem);
969         if (pinned_pages < num_pages) {
970                 ret = -EFAULT;
971                 goto fail_put_user_pages;
972         }
973
974         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
975
976         ret = i915_mutex_lock_interruptible(dev);
977         if (ret)
978                 goto fail_put_user_pages;
979
980         ret = i915_gem_object_get_pages_or_evict(obj);
981         if (ret)
982                 goto fail_unlock;
983
984         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
985         if (ret != 0)
986                 goto fail_put_pages;
987
988         obj_priv = to_intel_bo(obj);
989         offset = args->offset;
990         obj_priv->dirty = 1;
991
992         while (remain > 0) {
993                 /* Operation in this page
994                  *
995                  * shmem_page_index = page number within shmem file
996                  * shmem_page_offset = offset within page in shmem file
997                  * data_page_index = page number in get_user_pages return
998                  * data_page_offset = offset with data_page_index page.
999                  * page_length = bytes to copy for this page
1000                  */
1001                 shmem_page_index = offset / PAGE_SIZE;
1002                 shmem_page_offset = offset & ~PAGE_MASK;
1003                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
1004                 data_page_offset = data_ptr & ~PAGE_MASK;
1005
1006                 page_length = remain;
1007                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1008                         page_length = PAGE_SIZE - shmem_page_offset;
1009                 if ((data_page_offset + page_length) > PAGE_SIZE)
1010                         page_length = PAGE_SIZE - data_page_offset;
1011
1012                 if (do_bit17_swizzling) {
1013                         slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
1014                                               shmem_page_offset,
1015                                               user_pages[data_page_index],
1016                                               data_page_offset,
1017                                               page_length,
1018                                               0);
1019                 } else {
1020                         slow_shmem_copy(obj_priv->pages[shmem_page_index],
1021                                         shmem_page_offset,
1022                                         user_pages[data_page_index],
1023                                         data_page_offset,
1024                                         page_length);
1025                 }
1026
1027                 remain -= page_length;
1028                 data_ptr += page_length;
1029                 offset += page_length;
1030         }
1031
1032 fail_put_pages:
1033         i915_gem_object_put_pages(obj);
1034 fail_unlock:
1035         mutex_unlock(&dev->struct_mutex);
1036 fail_put_user_pages:
1037         for (i = 0; i < pinned_pages; i++)
1038                 page_cache_release(user_pages[i]);
1039         drm_free_large(user_pages);
1040
1041         return ret;
1042 }
1043
1044 /**
1045  * Writes data to the object referenced by handle.
1046  *
1047  * On error, the contents of the buffer that were to be modified are undefined.
1048  */
1049 int
1050 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1051                       struct drm_file *file_priv)
1052 {
1053         struct drm_i915_gem_pwrite *args = data;
1054         struct drm_gem_object *obj;
1055         struct drm_i915_gem_object *obj_priv;
1056         int ret = 0;
1057
1058         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1059         if (obj == NULL)
1060                 return -ENOENT;
1061         obj_priv = to_intel_bo(obj);
1062
1063         /* Bounds check destination. */
1064         if (args->offset > obj->size || args->size > obj->size - args->offset) {
1065                 ret = -EINVAL;
1066                 goto out;
1067         }
1068
1069         if (args->size == 0)
1070                 goto out;
1071
1072         if (!access_ok(VERIFY_READ,
1073                        (char __user *)(uintptr_t)args->data_ptr,
1074                        args->size)) {
1075                 ret = -EFAULT;
1076                 goto out;
1077         }
1078
1079         ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
1080                                       args->size);
1081         if (ret) {
1082                 ret = -EFAULT;
1083                 goto out;
1084         }
1085
1086         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1087          * it would end up going through the fenced access, and we'll get
1088          * different detiling behavior between reading and writing.
1089          * pread/pwrite currently are reading and writing from the CPU
1090          * perspective, requiring manual detiling by the client.
1091          */
1092         if (obj_priv->phys_obj)
1093                 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
1094         else if (obj_priv->tiling_mode == I915_TILING_NONE &&
1095                  obj_priv->gtt_space &&
1096                  obj->write_domain != I915_GEM_DOMAIN_CPU) {
1097                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
1098                 if (ret == -EFAULT) {
1099                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
1100                                                        file_priv);
1101                 }
1102         } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
1103                 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
1104         } else {
1105                 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
1106                 if (ret == -EFAULT) {
1107                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
1108                                                          file_priv);
1109                 }
1110         }
1111
1112 #if WATCH_PWRITE
1113         if (ret)
1114                 DRM_INFO("pwrite failed %d\n", ret);
1115 #endif
1116
1117 out:
1118         drm_gem_object_unreference_unlocked(obj);
1119         return ret;
1120 }
1121
1122 /**
1123  * Called when user space prepares to use an object with the CPU, either
1124  * through the mmap ioctl's mapping or a GTT mapping.
1125  */
1126 int
1127 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1128                           struct drm_file *file_priv)
1129 {
1130         struct drm_i915_private *dev_priv = dev->dev_private;
1131         struct drm_i915_gem_set_domain *args = data;
1132         struct drm_gem_object *obj;
1133         struct drm_i915_gem_object *obj_priv;
1134         uint32_t read_domains = args->read_domains;
1135         uint32_t write_domain = args->write_domain;
1136         int ret;
1137
1138         if (!(dev->driver->driver_features & DRIVER_GEM))
1139                 return -ENODEV;
1140
1141         /* Only handle setting domains to types used by the CPU. */
1142         if (write_domain & I915_GEM_GPU_DOMAINS)
1143                 return -EINVAL;
1144
1145         if (read_domains & I915_GEM_GPU_DOMAINS)
1146                 return -EINVAL;
1147
1148         /* Having something in the write domain implies it's in the read
1149          * domain, and only that read domain.  Enforce that in the request.
1150          */
1151         if (write_domain != 0 && read_domains != write_domain)
1152                 return -EINVAL;
1153
1154         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1155         if (obj == NULL)
1156                 return -ENOENT;
1157         obj_priv = to_intel_bo(obj);
1158
1159         ret = i915_mutex_lock_interruptible(dev);
1160         if (ret) {
1161                 drm_gem_object_unreference_unlocked(obj);
1162                 return ret;
1163         }
1164
1165         intel_mark_busy(dev, obj);
1166
1167         if (read_domains & I915_GEM_DOMAIN_GTT) {
1168                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1169
1170                 /* Update the LRU on the fence for the CPU access that's
1171                  * about to occur.
1172                  */
1173                 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1174                         struct drm_i915_fence_reg *reg =
1175                                 &dev_priv->fence_regs[obj_priv->fence_reg];
1176                         list_move_tail(&reg->lru_list,
1177                                        &dev_priv->mm.fence_list);
1178                 }
1179
1180                 /* Silently promote "you're not bound, there was nothing to do"
1181                  * to success, since the client was just asking us to
1182                  * make sure everything was done.
1183                  */
1184                 if (ret == -EINVAL)
1185                         ret = 0;
1186         } else {
1187                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1188         }
1189
1190         /* Maintain LRU order of "inactive" objects */
1191         if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1192                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1193
1194         drm_gem_object_unreference(obj);
1195         mutex_unlock(&dev->struct_mutex);
1196         return ret;
1197 }
1198
1199 /**
1200  * Called when user space has done writes to this buffer
1201  */
1202 int
1203 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1204                       struct drm_file *file_priv)
1205 {
1206         struct drm_i915_gem_sw_finish *args = data;
1207         struct drm_gem_object *obj;
1208         int ret = 0;
1209
1210         if (!(dev->driver->driver_features & DRIVER_GEM))
1211                 return -ENODEV;
1212
1213         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1214         if (obj == NULL)
1215                 return -ENOENT;
1216
1217         ret = i915_mutex_lock_interruptible(dev);
1218         if (ret) {
1219                 drm_gem_object_unreference_unlocked(obj);
1220                 return ret;
1221         }
1222
1223         /* Pinned buffers may be scanout, so flush the cache */
1224         if (to_intel_bo(obj)->pin_count)
1225                 i915_gem_object_flush_cpu_write_domain(obj);
1226
1227         drm_gem_object_unreference(obj);
1228         mutex_unlock(&dev->struct_mutex);
1229         return ret;
1230 }
1231
1232 /**
1233  * Maps the contents of an object, returning the address it is mapped
1234  * into.
1235  *
1236  * While the mapping holds a reference on the contents of the object, it doesn't
1237  * imply a ref on the object itself.
1238  */
1239 int
1240 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1241                    struct drm_file *file_priv)
1242 {
1243         struct drm_i915_gem_mmap *args = data;
1244         struct drm_gem_object *obj;
1245         loff_t offset;
1246         unsigned long addr;
1247
1248         if (!(dev->driver->driver_features & DRIVER_GEM))
1249                 return -ENODEV;
1250
1251         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1252         if (obj == NULL)
1253                 return -ENOENT;
1254
1255         offset = args->offset;
1256
1257         down_write(&current->mm->mmap_sem);
1258         addr = do_mmap(obj->filp, 0, args->size,
1259                        PROT_READ | PROT_WRITE, MAP_SHARED,
1260                        args->offset);
1261         up_write(&current->mm->mmap_sem);
1262         drm_gem_object_unreference_unlocked(obj);
1263         if (IS_ERR((void *)addr))
1264                 return addr;
1265
1266         args->addr_ptr = (uint64_t) addr;
1267
1268         return 0;
1269 }
1270
1271 /**
1272  * i915_gem_fault - fault a page into the GTT
1273  * vma: VMA in question
1274  * vmf: fault info
1275  *
1276  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1277  * from userspace.  The fault handler takes care of binding the object to
1278  * the GTT (if needed), allocating and programming a fence register (again,
1279  * only if needed based on whether the old reg is still valid or the object
1280  * is tiled) and inserting a new PTE into the faulting process.
1281  *
1282  * Note that the faulting process may involve evicting existing objects
1283  * from the GTT and/or fence registers to make room.  So performance may
1284  * suffer if the GTT working set is large or there are few fence registers
1285  * left.
1286  */
1287 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1288 {
1289         struct drm_gem_object *obj = vma->vm_private_data;
1290         struct drm_device *dev = obj->dev;
1291         drm_i915_private_t *dev_priv = dev->dev_private;
1292         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1293         pgoff_t page_offset;
1294         unsigned long pfn;
1295         int ret = 0;
1296         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1297
1298         /* We don't use vmf->pgoff since that has the fake offset */
1299         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1300                 PAGE_SHIFT;
1301
1302         /* Now bind it into the GTT if needed */
1303         mutex_lock(&dev->struct_mutex);
1304         if (!obj_priv->gtt_space) {
1305                 ret = i915_gem_object_bind_to_gtt(obj, 0);
1306                 if (ret)
1307                         goto unlock;
1308
1309                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1310                 if (ret)
1311                         goto unlock;
1312         }
1313
1314         /* Need a new fence register? */
1315         if (obj_priv->tiling_mode != I915_TILING_NONE) {
1316                 ret = i915_gem_object_get_fence_reg(obj, true);
1317                 if (ret)
1318                         goto unlock;
1319         }
1320
1321         if (i915_gem_object_is_inactive(obj_priv))
1322                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1323
1324         pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1325                 page_offset;
1326
1327         /* Finally, remap it using the new GTT offset */
1328         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1329 unlock:
1330         mutex_unlock(&dev->struct_mutex);
1331
1332         switch (ret) {
1333         case 0:
1334         case -ERESTARTSYS:
1335                 return VM_FAULT_NOPAGE;
1336         case -ENOMEM:
1337         case -EAGAIN:
1338                 return VM_FAULT_OOM;
1339         default:
1340                 return VM_FAULT_SIGBUS;
1341         }
1342 }
1343
1344 /**
1345  * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1346  * @obj: obj in question
1347  *
1348  * GEM memory mapping works by handing back to userspace a fake mmap offset
1349  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1350  * up the object based on the offset and sets up the various memory mapping
1351  * structures.
1352  *
1353  * This routine allocates and attaches a fake offset for @obj.
1354  */
1355 static int
1356 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1357 {
1358         struct drm_device *dev = obj->dev;
1359         struct drm_gem_mm *mm = dev->mm_private;
1360         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1361         struct drm_map_list *list;
1362         struct drm_local_map *map;
1363         int ret = 0;
1364
1365         /* Set the object up for mmap'ing */
1366         list = &obj->map_list;
1367         list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1368         if (!list->map)
1369                 return -ENOMEM;
1370
1371         map = list->map;
1372         map->type = _DRM_GEM;
1373         map->size = obj->size;
1374         map->handle = obj;
1375
1376         /* Get a DRM GEM mmap offset allocated... */
1377         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1378                                                     obj->size / PAGE_SIZE, 0, 0);
1379         if (!list->file_offset_node) {
1380                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1381                 ret = -ENOSPC;
1382                 goto out_free_list;
1383         }
1384
1385         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1386                                                   obj->size / PAGE_SIZE, 0);
1387         if (!list->file_offset_node) {
1388                 ret = -ENOMEM;
1389                 goto out_free_list;
1390         }
1391
1392         list->hash.key = list->file_offset_node->start;
1393         ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
1394         if (ret) {
1395                 DRM_ERROR("failed to add to map hash\n");
1396                 goto out_free_mm;
1397         }
1398
1399         /* By now we should be all set, any drm_mmap request on the offset
1400          * below will get to our mmap & fault handler */
1401         obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1402
1403         return 0;
1404
1405 out_free_mm:
1406         drm_mm_put_block(list->file_offset_node);
1407 out_free_list:
1408         kfree(list->map);
1409
1410         return ret;
1411 }
1412
1413 /**
1414  * i915_gem_release_mmap - remove physical page mappings
1415  * @obj: obj in question
1416  *
1417  * Preserve the reservation of the mmapping with the DRM core code, but
1418  * relinquish ownership of the pages back to the system.
1419  *
1420  * It is vital that we remove the page mapping if we have mapped a tiled
1421  * object through the GTT and then lose the fence register due to
1422  * resource pressure. Similarly if the object has been moved out of the
1423  * aperture, than pages mapped into userspace must be revoked. Removing the
1424  * mapping will then trigger a page fault on the next user access, allowing
1425  * fixup by i915_gem_fault().
1426  */
1427 void
1428 i915_gem_release_mmap(struct drm_gem_object *obj)
1429 {
1430         struct drm_device *dev = obj->dev;
1431         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1432
1433         if (dev->dev_mapping)
1434                 unmap_mapping_range(dev->dev_mapping,
1435                                     obj_priv->mmap_offset, obj->size, 1);
1436 }
1437
1438 static void
1439 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1440 {
1441         struct drm_device *dev = obj->dev;
1442         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1443         struct drm_gem_mm *mm = dev->mm_private;
1444         struct drm_map_list *list;
1445
1446         list = &obj->map_list;
1447         drm_ht_remove_item(&mm->offset_hash, &list->hash);
1448
1449         if (list->file_offset_node) {
1450                 drm_mm_put_block(list->file_offset_node);
1451                 list->file_offset_node = NULL;
1452         }
1453
1454         if (list->map) {
1455                 kfree(list->map);
1456                 list->map = NULL;
1457         }
1458
1459         obj_priv->mmap_offset = 0;
1460 }
1461
1462 /**
1463  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1464  * @obj: object to check
1465  *
1466  * Return the required GTT alignment for an object, taking into account
1467  * potential fence register mapping if needed.
1468  */
1469 static uint32_t
1470 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1471 {
1472         struct drm_device *dev = obj->dev;
1473         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1474         int start, i;
1475
1476         /*
1477          * Minimum alignment is 4k (GTT page size), but might be greater
1478          * if a fence register is needed for the object.
1479          */
1480         if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
1481                 return 4096;
1482
1483         /*
1484          * Previous chips need to be aligned to the size of the smallest
1485          * fence register that can contain the object.
1486          */
1487         if (INTEL_INFO(dev)->gen == 3)
1488                 start = 1024*1024;
1489         else
1490                 start = 512*1024;
1491
1492         for (i = start; i < obj->size; i <<= 1)
1493                 ;
1494
1495         return i;
1496 }
1497
1498 /**
1499  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1500  * @dev: DRM device
1501  * @data: GTT mapping ioctl data
1502  * @file_priv: GEM object info
1503  *
1504  * Simply returns the fake offset to userspace so it can mmap it.
1505  * The mmap call will end up in drm_gem_mmap(), which will set things
1506  * up so we can get faults in the handler above.
1507  *
1508  * The fault handler will take care of binding the object into the GTT
1509  * (since it may have been evicted to make room for something), allocating
1510  * a fence register, and mapping the appropriate aperture address into
1511  * userspace.
1512  */
1513 int
1514 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1515                         struct drm_file *file_priv)
1516 {
1517         struct drm_i915_gem_mmap_gtt *args = data;
1518         struct drm_gem_object *obj;
1519         struct drm_i915_gem_object *obj_priv;
1520         int ret;
1521
1522         if (!(dev->driver->driver_features & DRIVER_GEM))
1523                 return -ENODEV;
1524
1525         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1526         if (obj == NULL)
1527                 return -ENOENT;
1528
1529         ret = i915_mutex_lock_interruptible(dev);
1530         if (ret) {
1531                 drm_gem_object_unreference_unlocked(obj);
1532                 return ret;
1533         }
1534
1535         obj_priv = to_intel_bo(obj);
1536
1537         if (obj_priv->madv != I915_MADV_WILLNEED) {
1538                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1539                 drm_gem_object_unreference(obj);
1540                 mutex_unlock(&dev->struct_mutex);
1541                 return -EINVAL;
1542         }
1543
1544
1545         if (!obj_priv->mmap_offset) {
1546                 ret = i915_gem_create_mmap_offset(obj);
1547                 if (ret) {
1548                         drm_gem_object_unreference(obj);
1549                         mutex_unlock(&dev->struct_mutex);
1550                         return ret;
1551                 }
1552         }
1553
1554         args->offset = obj_priv->mmap_offset;
1555
1556         /*
1557          * Pull it into the GTT so that we have a page list (makes the
1558          * initial fault faster and any subsequent flushing possible).
1559          */
1560         if (!obj_priv->agp_mem) {
1561                 ret = i915_gem_object_bind_to_gtt(obj, 0);
1562                 if (ret) {
1563                         drm_gem_object_unreference(obj);
1564                         mutex_unlock(&dev->struct_mutex);
1565                         return ret;
1566                 }
1567         }
1568
1569         drm_gem_object_unreference(obj);
1570         mutex_unlock(&dev->struct_mutex);
1571
1572         return 0;
1573 }
1574
1575 static void
1576 i915_gem_object_put_pages(struct drm_gem_object *obj)
1577 {
1578         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1579         int page_count = obj->size / PAGE_SIZE;
1580         int i;
1581
1582         BUG_ON(obj_priv->pages_refcount == 0);
1583         BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1584
1585         if (--obj_priv->pages_refcount != 0)
1586                 return;
1587
1588         if (obj_priv->tiling_mode != I915_TILING_NONE)
1589                 i915_gem_object_save_bit_17_swizzle(obj);
1590
1591         if (obj_priv->madv == I915_MADV_DONTNEED)
1592                 obj_priv->dirty = 0;
1593
1594         for (i = 0; i < page_count; i++) {
1595                 if (obj_priv->dirty)
1596                         set_page_dirty(obj_priv->pages[i]);
1597
1598                 if (obj_priv->madv == I915_MADV_WILLNEED)
1599                         mark_page_accessed(obj_priv->pages[i]);
1600
1601                 page_cache_release(obj_priv->pages[i]);
1602         }
1603         obj_priv->dirty = 0;
1604
1605         drm_free_large(obj_priv->pages);
1606         obj_priv->pages = NULL;
1607 }
1608
1609 static uint32_t
1610 i915_gem_next_request_seqno(struct drm_device *dev,
1611                             struct intel_ring_buffer *ring)
1612 {
1613         drm_i915_private_t *dev_priv = dev->dev_private;
1614
1615         ring->outstanding_lazy_request = true;
1616         return dev_priv->next_seqno;
1617 }
1618
1619 static void
1620 i915_gem_object_move_to_active(struct drm_gem_object *obj,
1621                                struct intel_ring_buffer *ring)
1622 {
1623         struct drm_device *dev = obj->dev;
1624         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1625         uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1626
1627         BUG_ON(ring == NULL);
1628         obj_priv->ring = ring;
1629
1630         /* Add a reference if we're newly entering the active list. */
1631         if (!obj_priv->active) {
1632                 drm_gem_object_reference(obj);
1633                 obj_priv->active = 1;
1634         }
1635
1636         /* Move from whatever list we were on to the tail of execution. */
1637         list_move_tail(&obj_priv->list, &ring->active_list);
1638         obj_priv->last_rendering_seqno = seqno;
1639 }
1640
1641 static void
1642 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1643 {
1644         struct drm_device *dev = obj->dev;
1645         drm_i915_private_t *dev_priv = dev->dev_private;
1646         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1647
1648         BUG_ON(!obj_priv->active);
1649         list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1650         obj_priv->last_rendering_seqno = 0;
1651 }
1652
1653 /* Immediately discard the backing storage */
1654 static void
1655 i915_gem_object_truncate(struct drm_gem_object *obj)
1656 {
1657         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1658         struct inode *inode;
1659
1660         /* Our goal here is to return as much of the memory as
1661          * is possible back to the system as we are called from OOM.
1662          * To do this we must instruct the shmfs to drop all of its
1663          * backing pages, *now*. Here we mirror the actions taken
1664          * when by shmem_delete_inode() to release the backing store.
1665          */
1666         inode = obj->filp->f_path.dentry->d_inode;
1667         truncate_inode_pages(inode->i_mapping, 0);
1668         if (inode->i_op->truncate_range)
1669                 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1670
1671         obj_priv->madv = __I915_MADV_PURGED;
1672 }
1673
1674 static inline int
1675 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1676 {
1677         return obj_priv->madv == I915_MADV_DONTNEED;
1678 }
1679
1680 static void
1681 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1682 {
1683         struct drm_device *dev = obj->dev;
1684         drm_i915_private_t *dev_priv = dev->dev_private;
1685         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1686
1687         if (obj_priv->pin_count != 0)
1688                 list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
1689         else
1690                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1691
1692         BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1693
1694         obj_priv->last_rendering_seqno = 0;
1695         obj_priv->ring = NULL;
1696         if (obj_priv->active) {
1697                 obj_priv->active = 0;
1698                 drm_gem_object_unreference(obj);
1699         }
1700         WARN_ON(i915_verify_lists(dev));
1701 }
1702
1703 static void
1704 i915_gem_process_flushing_list(struct drm_device *dev,
1705                                uint32_t flush_domains,
1706                                struct intel_ring_buffer *ring)
1707 {
1708         drm_i915_private_t *dev_priv = dev->dev_private;
1709         struct drm_i915_gem_object *obj_priv, *next;
1710
1711         list_for_each_entry_safe(obj_priv, next,
1712                                  &dev_priv->mm.gpu_write_list,
1713                                  gpu_write_list) {
1714                 struct drm_gem_object *obj = &obj_priv->base;
1715
1716                 if (obj->write_domain & flush_domains &&
1717                     obj_priv->ring == ring) {
1718                         uint32_t old_write_domain = obj->write_domain;
1719
1720                         obj->write_domain = 0;
1721                         list_del_init(&obj_priv->gpu_write_list);
1722                         i915_gem_object_move_to_active(obj, ring);
1723
1724                         /* update the fence lru list */
1725                         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1726                                 struct drm_i915_fence_reg *reg =
1727                                         &dev_priv->fence_regs[obj_priv->fence_reg];
1728                                 list_move_tail(&reg->lru_list,
1729                                                 &dev_priv->mm.fence_list);
1730                         }
1731
1732                         trace_i915_gem_object_change_domain(obj,
1733                                                             obj->read_domains,
1734                                                             old_write_domain);
1735                 }
1736         }
1737 }
1738
1739 uint32_t
1740 i915_add_request(struct drm_device *dev,
1741                  struct drm_file *file,
1742                  struct drm_i915_gem_request *request,
1743                  struct intel_ring_buffer *ring)
1744 {
1745         drm_i915_private_t *dev_priv = dev->dev_private;
1746         struct drm_i915_file_private *file_priv = NULL;
1747         uint32_t seqno;
1748         int was_empty;
1749
1750         if (file != NULL)
1751                 file_priv = file->driver_priv;
1752
1753         if (request == NULL) {
1754                 request = kzalloc(sizeof(*request), GFP_KERNEL);
1755                 if (request == NULL)
1756                         return 0;
1757         }
1758
1759         seqno = ring->add_request(dev, ring, 0);
1760         ring->outstanding_lazy_request = false;
1761
1762         request->seqno = seqno;
1763         request->ring = ring;
1764         request->emitted_jiffies = jiffies;
1765         was_empty = list_empty(&ring->request_list);
1766         list_add_tail(&request->list, &ring->request_list);
1767
1768         if (file_priv) {
1769                 spin_lock(&file_priv->mm.lock);
1770                 request->file_priv = file_priv;
1771                 list_add_tail(&request->client_list,
1772                               &file_priv->mm.request_list);
1773                 spin_unlock(&file_priv->mm.lock);
1774         }
1775
1776         if (!dev_priv->mm.suspended) {
1777                 mod_timer(&dev_priv->hangcheck_timer,
1778                           jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1779                 if (was_empty)
1780                         queue_delayed_work(dev_priv->wq,
1781                                            &dev_priv->mm.retire_work, HZ);
1782         }
1783         return seqno;
1784 }
1785
1786 /**
1787  * Command execution barrier
1788  *
1789  * Ensures that all commands in the ring are finished
1790  * before signalling the CPU
1791  */
1792 static void
1793 i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1794 {
1795         uint32_t flush_domains = 0;
1796
1797         /* The sampler always gets flushed on i965 (sigh) */
1798         if (INTEL_INFO(dev)->gen >= 4)
1799                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1800
1801         ring->flush(dev, ring,
1802                         I915_GEM_DOMAIN_COMMAND, flush_domains);
1803 }
1804
1805 static inline void
1806 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1807 {
1808         struct drm_i915_file_private *file_priv = request->file_priv;
1809
1810         if (!file_priv)
1811                 return;
1812
1813         spin_lock(&file_priv->mm.lock);
1814         list_del(&request->client_list);
1815         request->file_priv = NULL;
1816         spin_unlock(&file_priv->mm.lock);
1817 }
1818
1819 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1820                                       struct intel_ring_buffer *ring)
1821 {
1822         while (!list_empty(&ring->request_list)) {
1823                 struct drm_i915_gem_request *request;
1824
1825                 request = list_first_entry(&ring->request_list,
1826                                            struct drm_i915_gem_request,
1827                                            list);
1828
1829                 list_del(&request->list);
1830                 i915_gem_request_remove_from_client(request);
1831                 kfree(request);
1832         }
1833
1834         while (!list_empty(&ring->active_list)) {
1835                 struct drm_i915_gem_object *obj_priv;
1836
1837                 obj_priv = list_first_entry(&ring->active_list,
1838                                             struct drm_i915_gem_object,
1839                                             list);
1840
1841                 obj_priv->base.write_domain = 0;
1842                 list_del_init(&obj_priv->gpu_write_list);
1843                 i915_gem_object_move_to_inactive(&obj_priv->base);
1844         }
1845 }
1846
1847 void i915_gem_reset(struct drm_device *dev)
1848 {
1849         struct drm_i915_private *dev_priv = dev->dev_private;
1850         struct drm_i915_gem_object *obj_priv;
1851         int i;
1852
1853         i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
1854         if (HAS_BSD(dev))
1855                 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
1856
1857         /* Remove anything from the flushing lists. The GPU cache is likely
1858          * to be lost on reset along with the data, so simply move the
1859          * lost bo to the inactive list.
1860          */
1861         while (!list_empty(&dev_priv->mm.flushing_list)) {
1862                 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1863                                             struct drm_i915_gem_object,
1864                                             list);
1865
1866                 obj_priv->base.write_domain = 0;
1867                 list_del_init(&obj_priv->gpu_write_list);
1868                 i915_gem_object_move_to_inactive(&obj_priv->base);
1869         }
1870
1871         /* Move everything out of the GPU domains to ensure we do any
1872          * necessary invalidation upon reuse.
1873          */
1874         list_for_each_entry(obj_priv,
1875                             &dev_priv->mm.inactive_list,
1876                             list)
1877         {
1878                 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1879         }
1880
1881         /* The fence registers are invalidated so clear them out */
1882         for (i = 0; i < 16; i++) {
1883                 struct drm_i915_fence_reg *reg;
1884
1885                 reg = &dev_priv->fence_regs[i];
1886                 if (!reg->obj)
1887                         continue;
1888
1889                 i915_gem_clear_fence_reg(reg->obj);
1890         }
1891 }
1892
1893 /**
1894  * This function clears the request list as sequence numbers are passed.
1895  */
1896 static void
1897 i915_gem_retire_requests_ring(struct drm_device *dev,
1898                               struct intel_ring_buffer *ring)
1899 {
1900         drm_i915_private_t *dev_priv = dev->dev_private;
1901         uint32_t seqno;
1902
1903         if (!ring->status_page.page_addr ||
1904             list_empty(&ring->request_list))
1905                 return;
1906
1907         WARN_ON(i915_verify_lists(dev));
1908
1909         seqno = ring->get_seqno(dev, ring);
1910         while (!list_empty(&ring->request_list)) {
1911                 struct drm_i915_gem_request *request;
1912
1913                 request = list_first_entry(&ring->request_list,
1914                                            struct drm_i915_gem_request,
1915                                            list);
1916
1917                 if (!i915_seqno_passed(seqno, request->seqno))
1918                         break;
1919
1920                 trace_i915_gem_request_retire(dev, request->seqno);
1921
1922                 list_del(&request->list);
1923                 i915_gem_request_remove_from_client(request);
1924                 kfree(request);
1925         }
1926
1927         /* Move any buffers on the active list that are no longer referenced
1928          * by the ringbuffer to the flushing/inactive lists as appropriate.
1929          */
1930         while (!list_empty(&ring->active_list)) {
1931                 struct drm_gem_object *obj;
1932                 struct drm_i915_gem_object *obj_priv;
1933
1934                 obj_priv = list_first_entry(&ring->active_list,
1935                                             struct drm_i915_gem_object,
1936                                             list);
1937
1938                 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
1939                         break;
1940
1941                 obj = &obj_priv->base;
1942                 if (obj->write_domain != 0)
1943                         i915_gem_object_move_to_flushing(obj);
1944                 else
1945                         i915_gem_object_move_to_inactive(obj);
1946         }
1947
1948         if (unlikely (dev_priv->trace_irq_seqno &&
1949                       i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1950                 ring->user_irq_put(dev, ring);
1951                 dev_priv->trace_irq_seqno = 0;
1952         }
1953
1954         WARN_ON(i915_verify_lists(dev));
1955 }
1956
1957 void
1958 i915_gem_retire_requests(struct drm_device *dev)
1959 {
1960         drm_i915_private_t *dev_priv = dev->dev_private;
1961
1962         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1963             struct drm_i915_gem_object *obj_priv, *tmp;
1964
1965             /* We must be careful that during unbind() we do not
1966              * accidentally infinitely recurse into retire requests.
1967              * Currently:
1968              *   retire -> free -> unbind -> wait -> retire_ring
1969              */
1970             list_for_each_entry_safe(obj_priv, tmp,
1971                                      &dev_priv->mm.deferred_free_list,
1972                                      list)
1973                     i915_gem_free_object_tail(&obj_priv->base);
1974         }
1975
1976         i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1977         if (HAS_BSD(dev))
1978                 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1979 }
1980
1981 static void
1982 i915_gem_retire_work_handler(struct work_struct *work)
1983 {
1984         drm_i915_private_t *dev_priv;
1985         struct drm_device *dev;
1986
1987         dev_priv = container_of(work, drm_i915_private_t,
1988                                 mm.retire_work.work);
1989         dev = dev_priv->dev;
1990
1991         /* Come back later if the device is busy... */
1992         if (!mutex_trylock(&dev->struct_mutex)) {
1993                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1994                 return;
1995         }
1996
1997         i915_gem_retire_requests(dev);
1998
1999         if (!dev_priv->mm.suspended &&
2000                 (!list_empty(&dev_priv->render_ring.request_list) ||
2001                         (HAS_BSD(dev) &&
2002                          !list_empty(&dev_priv->bsd_ring.request_list))))
2003                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2004         mutex_unlock(&dev->struct_mutex);
2005 }
2006
2007 int
2008 i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
2009                      bool interruptible, struct intel_ring_buffer *ring)
2010 {
2011         drm_i915_private_t *dev_priv = dev->dev_private;
2012         u32 ier;
2013         int ret = 0;
2014
2015         BUG_ON(seqno == 0);
2016
2017         if (atomic_read(&dev_priv->mm.wedged))
2018                 return -EAGAIN;
2019
2020         if (ring->outstanding_lazy_request) {
2021                 seqno = i915_add_request(dev, NULL, NULL, ring);
2022                 if (seqno == 0)
2023                         return -ENOMEM;
2024         }
2025         BUG_ON(seqno == dev_priv->next_seqno);
2026
2027         if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
2028                 if (HAS_PCH_SPLIT(dev))
2029                         ier = I915_READ(DEIER) | I915_READ(GTIER);
2030                 else
2031                         ier = I915_READ(IER);
2032                 if (!ier) {
2033                         DRM_ERROR("something (likely vbetool) disabled "
2034                                   "interrupts, re-enabling\n");
2035                         i915_driver_irq_preinstall(dev);
2036                         i915_driver_irq_postinstall(dev);
2037                 }
2038
2039                 trace_i915_gem_request_wait_begin(dev, seqno);
2040
2041                 ring->waiting_gem_seqno = seqno;
2042                 ring->user_irq_get(dev, ring);
2043                 if (interruptible)
2044                         ret = wait_event_interruptible(ring->irq_queue,
2045                                 i915_seqno_passed(
2046                                         ring->get_seqno(dev, ring), seqno)
2047                                 || atomic_read(&dev_priv->mm.wedged));
2048                 else
2049                         wait_event(ring->irq_queue,
2050                                 i915_seqno_passed(
2051                                         ring->get_seqno(dev, ring), seqno)
2052                                 || atomic_read(&dev_priv->mm.wedged));
2053
2054                 ring->user_irq_put(dev, ring);
2055                 ring->waiting_gem_seqno = 0;
2056
2057                 trace_i915_gem_request_wait_end(dev, seqno);
2058         }
2059         if (atomic_read(&dev_priv->mm.wedged))
2060                 ret = -EAGAIN;
2061
2062         if (ret && ret != -ERESTARTSYS)
2063                 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2064                           __func__, ret, seqno, ring->get_seqno(dev, ring),
2065                           dev_priv->next_seqno);
2066
2067         /* Directly dispatch request retiring.  While we have the work queue
2068          * to handle this, the waiter on a request often wants an associated
2069          * buffer to have made it to the inactive list, and we would need
2070          * a separate wait queue to handle that.
2071          */
2072         if (ret == 0)
2073                 i915_gem_retire_requests_ring(dev, ring);
2074
2075         return ret;
2076 }
2077
2078 /**
2079  * Waits for a sequence number to be signaled, and cleans up the
2080  * request and object lists appropriately for that event.
2081  */
2082 static int
2083 i915_wait_request(struct drm_device *dev, uint32_t seqno,
2084                   struct intel_ring_buffer *ring)
2085 {
2086         return i915_do_wait_request(dev, seqno, 1, ring);
2087 }
2088
2089 static void
2090 i915_gem_flush_ring(struct drm_device *dev,
2091                     struct drm_file *file_priv,
2092                     struct intel_ring_buffer *ring,
2093                     uint32_t invalidate_domains,
2094                     uint32_t flush_domains)
2095 {
2096         ring->flush(dev, ring, invalidate_domains, flush_domains);
2097         i915_gem_process_flushing_list(dev, flush_domains, ring);
2098 }
2099
2100 static void
2101 i915_gem_flush(struct drm_device *dev,
2102                struct drm_file *file_priv,
2103                uint32_t invalidate_domains,
2104                uint32_t flush_domains,
2105                uint32_t flush_rings)
2106 {
2107         drm_i915_private_t *dev_priv = dev->dev_private;
2108
2109         if (flush_domains & I915_GEM_DOMAIN_CPU)
2110                 drm_agp_chipset_flush(dev);
2111
2112         if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
2113                 if (flush_rings & RING_RENDER)
2114                         i915_gem_flush_ring(dev, file_priv,
2115                                             &dev_priv->render_ring,
2116                                             invalidate_domains, flush_domains);
2117                 if (flush_rings & RING_BSD)
2118                         i915_gem_flush_ring(dev, file_priv,
2119                                             &dev_priv->bsd_ring,
2120                                             invalidate_domains, flush_domains);
2121         }
2122 }
2123
2124 /**
2125  * Ensures that all rendering to the object has completed and the object is
2126  * safe to unbind from the GTT or access from the CPU.
2127  */
2128 static int
2129 i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2130                                bool interruptible)
2131 {
2132         struct drm_device *dev = obj->dev;
2133         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2134         int ret;
2135
2136         /* This function only exists to support waiting for existing rendering,
2137          * not for emitting required flushes.
2138          */
2139         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
2140
2141         /* If there is rendering queued on the buffer being evicted, wait for
2142          * it.
2143          */
2144         if (obj_priv->active) {
2145                 ret = i915_do_wait_request(dev,
2146                                            obj_priv->last_rendering_seqno,
2147                                            interruptible,
2148                                            obj_priv->ring);
2149                 if (ret)
2150                         return ret;
2151         }
2152
2153         return 0;
2154 }
2155
2156 /**
2157  * Unbinds an object from the GTT aperture.
2158  */
2159 int
2160 i915_gem_object_unbind(struct drm_gem_object *obj)
2161 {
2162         struct drm_device *dev = obj->dev;
2163         struct drm_i915_private *dev_priv = dev->dev_private;
2164         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2165         int ret = 0;
2166
2167         if (obj_priv->gtt_space == NULL)
2168                 return 0;
2169
2170         if (obj_priv->pin_count != 0) {
2171                 DRM_ERROR("Attempting to unbind pinned buffer\n");
2172                 return -EINVAL;
2173         }
2174
2175         /* blow away mappings if mapped through GTT */
2176         i915_gem_release_mmap(obj);
2177
2178         /* Move the object to the CPU domain to ensure that
2179          * any possible CPU writes while it's not in the GTT
2180          * are flushed when we go to remap it. This will
2181          * also ensure that all pending GPU writes are finished
2182          * before we unbind.
2183          */
2184         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2185         if (ret == -ERESTARTSYS)
2186                 return ret;
2187         /* Continue on if we fail due to EIO, the GPU is hung so we
2188          * should be safe and we need to cleanup or else we might
2189          * cause memory corruption through use-after-free.
2190          */
2191         if (ret) {
2192                 i915_gem_clflush_object(obj);
2193                 obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
2194         }
2195
2196         /* release the fence reg _after_ flushing */
2197         if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2198                 i915_gem_clear_fence_reg(obj);
2199
2200         drm_unbind_agp(obj_priv->agp_mem);
2201         drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2202
2203         i915_gem_object_put_pages(obj);
2204         BUG_ON(obj_priv->pages_refcount);
2205
2206         i915_gem_info_remove_gtt(dev_priv, obj->size);
2207         list_del_init(&obj_priv->list);
2208
2209         drm_mm_put_block(obj_priv->gtt_space);
2210         obj_priv->gtt_space = NULL;
2211
2212         if (i915_gem_object_is_purgeable(obj_priv))
2213                 i915_gem_object_truncate(obj);
2214
2215         trace_i915_gem_object_unbind(obj);
2216
2217         return ret;
2218 }
2219
2220 static int i915_ring_idle(struct drm_device *dev,
2221                           struct intel_ring_buffer *ring)
2222 {
2223         i915_gem_flush_ring(dev, NULL, ring,
2224                             I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2225         return i915_wait_request(dev,
2226                                  i915_gem_next_request_seqno(dev, ring),
2227                                  ring);
2228 }
2229
2230 int
2231 i915_gpu_idle(struct drm_device *dev)
2232 {
2233         drm_i915_private_t *dev_priv = dev->dev_private;
2234         bool lists_empty;
2235         int ret;
2236
2237         lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2238                        list_empty(&dev_priv->render_ring.active_list) &&
2239                        (!HAS_BSD(dev) ||
2240                         list_empty(&dev_priv->bsd_ring.active_list)));
2241         if (lists_empty)
2242                 return 0;
2243
2244         /* Flush everything onto the inactive list. */
2245         ret = i915_ring_idle(dev, &dev_priv->render_ring);
2246         if (ret)
2247                 return ret;
2248
2249         if (HAS_BSD(dev)) {
2250                 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2251                 if (ret)
2252                         return ret;
2253         }
2254
2255         return 0;
2256 }
2257
2258 static int
2259 i915_gem_object_get_pages(struct drm_gem_object *obj,
2260                           gfp_t gfpmask)
2261 {
2262         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2263         int page_count, i;
2264         struct address_space *mapping;
2265         struct inode *inode;
2266         struct page *page;
2267
2268         BUG_ON(obj_priv->pages_refcount
2269                         == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2270
2271         if (obj_priv->pages_refcount++ != 0)
2272                 return 0;
2273
2274         /* Get the list of pages out of our struct file.  They'll be pinned
2275          * at this point until we release them.
2276          */
2277         page_count = obj->size / PAGE_SIZE;
2278         BUG_ON(obj_priv->pages != NULL);
2279         obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2280         if (obj_priv->pages == NULL) {
2281                 obj_priv->pages_refcount--;
2282                 return -ENOMEM;
2283         }
2284
2285         inode = obj->filp->f_path.dentry->d_inode;
2286         mapping = inode->i_mapping;
2287         for (i = 0; i < page_count; i++) {
2288                 page = read_cache_page_gfp(mapping, i,
2289                                            GFP_HIGHUSER |
2290                                            __GFP_COLD |
2291                                            __GFP_RECLAIMABLE |
2292                                            gfpmask);
2293                 if (IS_ERR(page))
2294                         goto err_pages;
2295
2296                 obj_priv->pages[i] = page;
2297         }
2298
2299         if (obj_priv->tiling_mode != I915_TILING_NONE)
2300                 i915_gem_object_do_bit_17_swizzle(obj);
2301
2302         return 0;
2303
2304 err_pages:
2305         while (i--)
2306                 page_cache_release(obj_priv->pages[i]);
2307
2308         drm_free_large(obj_priv->pages);
2309         obj_priv->pages = NULL;
2310         obj_priv->pages_refcount--;
2311         return PTR_ERR(page);
2312 }
2313
2314 static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2315 {
2316         struct drm_gem_object *obj = reg->obj;
2317         struct drm_device *dev = obj->dev;
2318         drm_i915_private_t *dev_priv = dev->dev_private;
2319         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2320         int regnum = obj_priv->fence_reg;
2321         uint64_t val;
2322
2323         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2324                     0xfffff000) << 32;
2325         val |= obj_priv->gtt_offset & 0xfffff000;
2326         val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
2327                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2328
2329         if (obj_priv->tiling_mode == I915_TILING_Y)
2330                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2331         val |= I965_FENCE_REG_VALID;
2332
2333         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
2334 }
2335
2336 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2337 {
2338         struct drm_gem_object *obj = reg->obj;
2339         struct drm_device *dev = obj->dev;
2340         drm_i915_private_t *dev_priv = dev->dev_private;
2341         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2342         int regnum = obj_priv->fence_reg;
2343         uint64_t val;
2344
2345         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2346                     0xfffff000) << 32;
2347         val |= obj_priv->gtt_offset & 0xfffff000;
2348         val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2349         if (obj_priv->tiling_mode == I915_TILING_Y)
2350                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2351         val |= I965_FENCE_REG_VALID;
2352
2353         I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2354 }
2355
2356 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2357 {
2358         struct drm_gem_object *obj = reg->obj;
2359         struct drm_device *dev = obj->dev;
2360         drm_i915_private_t *dev_priv = dev->dev_private;
2361         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2362         int regnum = obj_priv->fence_reg;
2363         int tile_width;
2364         uint32_t fence_reg, val;
2365         uint32_t pitch_val;
2366
2367         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2368             (obj_priv->gtt_offset & (obj->size - 1))) {
2369                 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2370                      __func__, obj_priv->gtt_offset, obj->size);
2371                 return;
2372         }
2373
2374         if (obj_priv->tiling_mode == I915_TILING_Y &&
2375             HAS_128_BYTE_Y_TILING(dev))
2376                 tile_width = 128;
2377         else
2378                 tile_width = 512;
2379
2380         /* Note: pitch better be a power of two tile widths */
2381         pitch_val = obj_priv->stride / tile_width;
2382         pitch_val = ffs(pitch_val) - 1;
2383
2384         if (obj_priv->tiling_mode == I915_TILING_Y &&
2385             HAS_128_BYTE_Y_TILING(dev))
2386                 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2387         else
2388                 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2389
2390         val = obj_priv->gtt_offset;
2391         if (obj_priv->tiling_mode == I915_TILING_Y)
2392                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2393         val |= I915_FENCE_SIZE_BITS(obj->size);
2394         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2395         val |= I830_FENCE_REG_VALID;
2396
2397         if (regnum < 8)
2398                 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2399         else
2400                 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2401         I915_WRITE(fence_reg, val);
2402 }
2403
2404 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2405 {
2406         struct drm_gem_object *obj = reg->obj;
2407         struct drm_device *dev = obj->dev;
2408         drm_i915_private_t *dev_priv = dev->dev_private;
2409         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2410         int regnum = obj_priv->fence_reg;
2411         uint32_t val;
2412         uint32_t pitch_val;
2413         uint32_t fence_size_bits;
2414
2415         if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2416             (obj_priv->gtt_offset & (obj->size - 1))) {
2417                 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2418                      __func__, obj_priv->gtt_offset);
2419                 return;
2420         }
2421
2422         pitch_val = obj_priv->stride / 128;
2423         pitch_val = ffs(pitch_val) - 1;
2424         WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2425
2426         val = obj_priv->gtt_offset;
2427         if (obj_priv->tiling_mode == I915_TILING_Y)
2428                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2429         fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2430         WARN_ON(fence_size_bits & ~0x00000f00);
2431         val |= fence_size_bits;
2432         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2433         val |= I830_FENCE_REG_VALID;
2434
2435         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2436 }
2437
2438 static int i915_find_fence_reg(struct drm_device *dev,
2439                                bool interruptible)
2440 {
2441         struct drm_i915_fence_reg *reg = NULL;
2442         struct drm_i915_gem_object *obj_priv = NULL;
2443         struct drm_i915_private *dev_priv = dev->dev_private;
2444         struct drm_gem_object *obj = NULL;
2445         int i, avail, ret;
2446
2447         /* First try to find a free reg */
2448         avail = 0;
2449         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2450                 reg = &dev_priv->fence_regs[i];
2451                 if (!reg->obj)
2452                         return i;
2453
2454                 obj_priv = to_intel_bo(reg->obj);
2455                 if (!obj_priv->pin_count)
2456                     avail++;
2457         }
2458
2459         if (avail == 0)
2460                 return -ENOSPC;
2461
2462         /* None available, try to steal one or wait for a user to finish */
2463         i = I915_FENCE_REG_NONE;
2464         list_for_each_entry(reg, &dev_priv->mm.fence_list,
2465                             lru_list) {
2466                 obj = reg->obj;
2467                 obj_priv = to_intel_bo(obj);
2468
2469                 if (obj_priv->pin_count)
2470                         continue;
2471
2472                 /* found one! */
2473                 i = obj_priv->fence_reg;
2474                 break;
2475         }
2476
2477         BUG_ON(i == I915_FENCE_REG_NONE);
2478
2479         /* We only have a reference on obj from the active list. put_fence_reg
2480          * might drop that one, causing a use-after-free in it. So hold a
2481          * private reference to obj like the other callers of put_fence_reg
2482          * (set_tiling ioctl) do. */
2483         drm_gem_object_reference(obj);
2484         ret = i915_gem_object_put_fence_reg(obj, interruptible);
2485         drm_gem_object_unreference(obj);
2486         if (ret != 0)
2487                 return ret;
2488
2489         return i;
2490 }
2491
2492 /**
2493  * i915_gem_object_get_fence_reg - set up a fence reg for an object
2494  * @obj: object to map through a fence reg
2495  *
2496  * When mapping objects through the GTT, userspace wants to be able to write
2497  * to them without having to worry about swizzling if the object is tiled.
2498  *
2499  * This function walks the fence regs looking for a free one for @obj,
2500  * stealing one if it can't find any.
2501  *
2502  * It then sets up the reg based on the object's properties: address, pitch
2503  * and tiling format.
2504  */
2505 int
2506 i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2507                               bool interruptible)
2508 {
2509         struct drm_device *dev = obj->dev;
2510         struct drm_i915_private *dev_priv = dev->dev_private;
2511         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2512         struct drm_i915_fence_reg *reg = NULL;
2513         int ret;
2514
2515         /* Just update our place in the LRU if our fence is getting used. */
2516         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2517                 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2518                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2519                 return 0;
2520         }
2521
2522         switch (obj_priv->tiling_mode) {
2523         case I915_TILING_NONE:
2524                 WARN(1, "allocating a fence for non-tiled object?\n");
2525                 break;
2526         case I915_TILING_X:
2527                 if (!obj_priv->stride)
2528                         return -EINVAL;
2529                 WARN((obj_priv->stride & (512 - 1)),
2530                      "object 0x%08x is X tiled but has non-512B pitch\n",
2531                      obj_priv->gtt_offset);
2532                 break;
2533         case I915_TILING_Y:
2534                 if (!obj_priv->stride)
2535                         return -EINVAL;
2536                 WARN((obj_priv->stride & (128 - 1)),
2537                      "object 0x%08x is Y tiled but has non-128B pitch\n",
2538                      obj_priv->gtt_offset);
2539                 break;
2540         }
2541
2542         ret = i915_find_fence_reg(dev, interruptible);
2543         if (ret < 0)
2544                 return ret;
2545
2546         obj_priv->fence_reg = ret;
2547         reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2548         list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2549
2550         reg->obj = obj;
2551
2552         switch (INTEL_INFO(dev)->gen) {
2553         case 6:
2554                 sandybridge_write_fence_reg(reg);
2555                 break;
2556         case 5:
2557         case 4:
2558                 i965_write_fence_reg(reg);
2559                 break;
2560         case 3:
2561                 i915_write_fence_reg(reg);
2562                 break;
2563         case 2:
2564                 i830_write_fence_reg(reg);
2565                 break;
2566         }
2567
2568         trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2569                         obj_priv->tiling_mode);
2570
2571         return 0;
2572 }
2573
2574 /**
2575  * i915_gem_clear_fence_reg - clear out fence register info
2576  * @obj: object to clear
2577  *
2578  * Zeroes out the fence register itself and clears out the associated
2579  * data structures in dev_priv and obj_priv.
2580  */
2581 static void
2582 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2583 {
2584         struct drm_device *dev = obj->dev;
2585         drm_i915_private_t *dev_priv = dev->dev_private;
2586         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2587         struct drm_i915_fence_reg *reg =
2588                 &dev_priv->fence_regs[obj_priv->fence_reg];
2589         uint32_t fence_reg;
2590
2591         switch (INTEL_INFO(dev)->gen) {
2592         case 6:
2593                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2594                              (obj_priv->fence_reg * 8), 0);
2595                 break;
2596         case 5:
2597         case 4:
2598                 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2599                 break;
2600         case 3:
2601                 if (obj_priv->fence_reg >= 8)
2602                         fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2603                 else
2604         case 2:
2605                         fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2606
2607                 I915_WRITE(fence_reg, 0);
2608                 break;
2609         }
2610
2611         reg->obj = NULL;
2612         obj_priv->fence_reg = I915_FENCE_REG_NONE;
2613         list_del_init(&reg->lru_list);
2614 }
2615
2616 /**
2617  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2618  * to the buffer to finish, and then resets the fence register.
2619  * @obj: tiled object holding a fence register.
2620  * @bool: whether the wait upon the fence is interruptible
2621  *
2622  * Zeroes out the fence register itself and clears out the associated
2623  * data structures in dev_priv and obj_priv.
2624  */
2625 int
2626 i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2627                               bool interruptible)
2628 {
2629         struct drm_device *dev = obj->dev;
2630         struct drm_i915_private *dev_priv = dev->dev_private;
2631         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2632         struct drm_i915_fence_reg *reg;
2633
2634         if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2635                 return 0;
2636
2637         /* If we've changed tiling, GTT-mappings of the object
2638          * need to re-fault to ensure that the correct fence register
2639          * setup is in place.
2640          */
2641         i915_gem_release_mmap(obj);
2642
2643         /* On the i915, GPU access to tiled buffers is via a fence,
2644          * therefore we must wait for any outstanding access to complete
2645          * before clearing the fence.
2646          */
2647         reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2648         if (reg->gpu) {
2649                 int ret;
2650
2651                 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2652                 if (ret)
2653                         return ret;
2654
2655                 ret = i915_gem_object_wait_rendering(obj, interruptible);
2656                 if (ret)
2657                         return ret;
2658
2659                 reg->gpu = false;
2660         }
2661
2662         i915_gem_object_flush_gtt_write_domain(obj);
2663         i915_gem_clear_fence_reg(obj);
2664
2665         return 0;
2666 }
2667
2668 /**
2669  * Finds free space in the GTT aperture and binds the object there.
2670  */
2671 static int
2672 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2673 {
2674         struct drm_device *dev = obj->dev;
2675         drm_i915_private_t *dev_priv = dev->dev_private;
2676         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2677         struct drm_mm_node *free_space;
2678         gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
2679         int ret;
2680
2681         if (obj_priv->madv != I915_MADV_WILLNEED) {
2682                 DRM_ERROR("Attempting to bind a purgeable object\n");
2683                 return -EINVAL;
2684         }
2685
2686         if (alignment == 0)
2687                 alignment = i915_gem_get_gtt_alignment(obj);
2688         if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2689                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2690                 return -EINVAL;
2691         }
2692
2693         /* If the object is bigger than the entire aperture, reject it early
2694          * before evicting everything in a vain attempt to find space.
2695          */
2696         if (obj->size > dev_priv->mm.gtt_total) {
2697                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2698                 return -E2BIG;
2699         }
2700
2701  search_free:
2702         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2703                                         obj->size, alignment, 0);
2704         if (free_space != NULL) {
2705                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2706                                                        alignment);
2707                 if (obj_priv->gtt_space != NULL)
2708                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
2709         }
2710         if (obj_priv->gtt_space == NULL) {
2711                 /* If the gtt is empty and we're still having trouble
2712                  * fitting our object in, we're out of memory.
2713                  */
2714                 ret = i915_gem_evict_something(dev, obj->size, alignment);
2715                 if (ret)
2716                         return ret;
2717
2718                 goto search_free;
2719         }
2720
2721         ret = i915_gem_object_get_pages(obj, gfpmask);
2722         if (ret) {
2723                 drm_mm_put_block(obj_priv->gtt_space);
2724                 obj_priv->gtt_space = NULL;
2725
2726                 if (ret == -ENOMEM) {
2727                         /* first try to clear up some space from the GTT */
2728                         ret = i915_gem_evict_something(dev, obj->size,
2729                                                        alignment);
2730                         if (ret) {
2731                                 /* now try to shrink everyone else */
2732                                 if (gfpmask) {
2733                                         gfpmask = 0;
2734                                         goto search_free;
2735                                 }
2736
2737                                 return ret;
2738                         }
2739
2740                         goto search_free;
2741                 }
2742
2743                 return ret;
2744         }
2745
2746         /* Create an AGP memory structure pointing at our pages, and bind it
2747          * into the GTT.
2748          */
2749         obj_priv->agp_mem = drm_agp_bind_pages(dev,
2750                                                obj_priv->pages,
2751                                                obj->size >> PAGE_SHIFT,
2752                                                obj_priv->gtt_offset,
2753                                                obj_priv->agp_type);
2754         if (obj_priv->agp_mem == NULL) {
2755                 i915_gem_object_put_pages(obj);
2756                 drm_mm_put_block(obj_priv->gtt_space);
2757                 obj_priv->gtt_space = NULL;
2758
2759                 ret = i915_gem_evict_something(dev, obj->size, alignment);
2760                 if (ret)
2761                         return ret;
2762
2763                 goto search_free;
2764         }
2765
2766         /* keep track of bounds object by adding it to the inactive list */
2767         list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2768         i915_gem_info_add_gtt(dev_priv, obj->size);
2769
2770         /* Assert that the object is not currently in any GPU domain. As it
2771          * wasn't in the GTT, there shouldn't be any way it could have been in
2772          * a GPU cache
2773          */
2774         BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2775         BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2776
2777         trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2778
2779         return 0;
2780 }
2781
2782 void
2783 i915_gem_clflush_object(struct drm_gem_object *obj)
2784 {
2785         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
2786
2787         /* If we don't have a page list set up, then we're not pinned
2788          * to GPU, and we can ignore the cache flush because it'll happen
2789          * again at bind time.
2790          */
2791         if (obj_priv->pages == NULL)
2792                 return;
2793
2794         trace_i915_gem_object_clflush(obj);
2795
2796         drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2797 }
2798
2799 /** Flushes any GPU write domain for the object if it's dirty. */
2800 static int
2801 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2802                                        bool pipelined)
2803 {
2804         struct drm_device *dev = obj->dev;
2805         uint32_t old_write_domain;
2806
2807         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2808                 return 0;
2809
2810         /* Queue the GPU write cache flushing we need. */
2811         old_write_domain = obj->write_domain;
2812         i915_gem_flush_ring(dev, NULL,
2813                             to_intel_bo(obj)->ring,
2814                             0, obj->write_domain);
2815         BUG_ON(obj->write_domain);
2816
2817         trace_i915_gem_object_change_domain(obj,
2818                                             obj->read_domains,
2819                                             old_write_domain);
2820
2821         if (pipelined)
2822                 return 0;
2823
2824         return i915_gem_object_wait_rendering(obj, true);
2825 }
2826
2827 /** Flushes the GTT write domain for the object if it's dirty. */
2828 static void
2829 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2830 {
2831         uint32_t old_write_domain;
2832
2833         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2834                 return;
2835
2836         /* No actual flushing is required for the GTT write domain.   Writes
2837          * to it immediately go to main memory as far as we know, so there's
2838          * no chipset flush.  It also doesn't land in render cache.
2839          */
2840         old_write_domain = obj->write_domain;
2841         obj->write_domain = 0;
2842
2843         trace_i915_gem_object_change_domain(obj,
2844                                             obj->read_domains,
2845                                             old_write_domain);
2846 }
2847
2848 /** Flushes the CPU write domain for the object if it's dirty. */
2849 static void
2850 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2851 {
2852         struct drm_device *dev = obj->dev;
2853         uint32_t old_write_domain;
2854
2855         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2856                 return;
2857
2858         i915_gem_clflush_object(obj);
2859         drm_agp_chipset_flush(dev);
2860         old_write_domain = obj->write_domain;
2861         obj->write_domain = 0;
2862
2863         trace_i915_gem_object_change_domain(obj,
2864                                             obj->read_domains,
2865                                             old_write_domain);
2866 }
2867
2868 /**
2869  * Moves a single object to the GTT read, and possibly write domain.
2870  *
2871  * This function returns when the move is complete, including waiting on
2872  * flushes to occur.
2873  */
2874 int
2875 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2876 {
2877         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2878         uint32_t old_write_domain, old_read_domains;
2879         int ret;
2880
2881         /* Not valid to be called on unbound objects. */
2882         if (obj_priv->gtt_space == NULL)
2883                 return -EINVAL;
2884
2885         ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2886         if (ret != 0)
2887                 return ret;
2888
2889         i915_gem_object_flush_cpu_write_domain(obj);
2890
2891         if (write) {
2892                 ret = i915_gem_object_wait_rendering(obj, true);
2893                 if (ret)
2894                         return ret;
2895         }
2896
2897         old_write_domain = obj->write_domain;
2898         old_read_domains = obj->read_domains;
2899
2900         /* It should now be out of any other write domains, and we can update
2901          * the domain values for our changes.
2902          */
2903         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2904         obj->read_domains |= I915_GEM_DOMAIN_GTT;
2905         if (write) {
2906                 obj->read_domains = I915_GEM_DOMAIN_GTT;
2907                 obj->write_domain = I915_GEM_DOMAIN_GTT;
2908                 obj_priv->dirty = 1;
2909         }
2910
2911         trace_i915_gem_object_change_domain(obj,
2912                                             old_read_domains,
2913                                             old_write_domain);
2914
2915         return 0;
2916 }
2917
2918 /*
2919  * Prepare buffer for display plane. Use uninterruptible for possible flush
2920  * wait, as in modesetting process we're not supposed to be interrupted.
2921  */
2922 int
2923 i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2924                                      bool pipelined)
2925 {
2926         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2927         uint32_t old_read_domains;
2928         int ret;
2929
2930         /* Not valid to be called on unbound objects. */
2931         if (obj_priv->gtt_space == NULL)
2932                 return -EINVAL;
2933
2934         ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2935         if (ret)
2936                 return ret;
2937
2938         /* Currently, we are always called from an non-interruptible context. */
2939         if (!pipelined) {
2940                 ret = i915_gem_object_wait_rendering(obj, false);
2941                 if (ret)
2942                         return ret;
2943         }
2944
2945         i915_gem_object_flush_cpu_write_domain(obj);
2946
2947         old_read_domains = obj->read_domains;
2948         obj->read_domains |= I915_GEM_DOMAIN_GTT;
2949
2950         trace_i915_gem_object_change_domain(obj,
2951                                             old_read_domains,
2952                                             obj->write_domain);
2953
2954         return 0;
2955 }
2956
2957 /**
2958  * Moves a single object to the CPU read, and possibly write domain.
2959  *
2960  * This function returns when the move is complete, including waiting on
2961  * flushes to occur.
2962  */
2963 static int
2964 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2965 {
2966         uint32_t old_write_domain, old_read_domains;
2967         int ret;
2968
2969         ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2970         if (ret != 0)
2971                 return ret;
2972
2973         i915_gem_object_flush_gtt_write_domain(obj);
2974
2975         /* If we have a partially-valid cache of the object in the CPU,
2976          * finish invalidating it and free the per-page flags.
2977          */
2978         i915_gem_object_set_to_full_cpu_read_domain(obj);
2979
2980         if (write) {
2981                 ret = i915_gem_object_wait_rendering(obj, true);
2982                 if (ret)
2983                         return ret;
2984         }
2985
2986         old_write_domain = obj->write_domain;
2987         old_read_domains = obj->read_domains;
2988
2989         /* Flush the CPU cache if it's still invalid. */
2990         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2991                 i915_gem_clflush_object(obj);
2992
2993                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2994         }
2995
2996         /* It should now be out of any other write domains, and we can update
2997          * the domain values for our changes.
2998          */
2999         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3000
3001         /* If we're writing through the CPU, then the GPU read domains will
3002          * need to be invalidated at next use.
3003          */
3004         if (write) {
3005                 obj->read_domains = I915_GEM_DOMAIN_CPU;
3006                 obj->write_domain = I915_GEM_DOMAIN_CPU;
3007         }
3008
3009         trace_i915_gem_object_change_domain(obj,
3010                                             old_read_domains,
3011                                             old_write_domain);
3012
3013         return 0;
3014 }
3015
3016 /*
3017  * Set the next domain for the specified object. This
3018  * may not actually perform the necessary flushing/invaliding though,
3019  * as that may want to be batched with other set_domain operations
3020  *
3021  * This is (we hope) the only really tricky part of gem. The goal
3022  * is fairly simple -- track which caches hold bits of the object
3023  * and make sure they remain coherent. A few concrete examples may
3024  * help to explain how it works. For shorthand, we use the notation
3025  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
3026  * a pair of read and write domain masks.
3027  *
3028  * Case 1: the batch buffer
3029  *
3030  *      1. Allocated
3031  *      2. Written by CPU
3032  *      3. Mapped to GTT
3033  *      4. Read by GPU
3034  *      5. Unmapped from GTT
3035  *      6. Freed
3036  *
3037  *      Let's take these a step at a time
3038  *
3039  *      1. Allocated
3040  *              Pages allocated from the kernel may still have
3041  *              cache contents, so we set them to (CPU, CPU) always.
3042  *      2. Written by CPU (using pwrite)
3043  *              The pwrite function calls set_domain (CPU, CPU) and
3044  *              this function does nothing (as nothing changes)
3045  *      3. Mapped by GTT
3046  *              This function asserts that the object is not
3047  *              currently in any GPU-based read or write domains
3048  *      4. Read by GPU
3049  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
3050  *              As write_domain is zero, this function adds in the
3051  *              current read domains (CPU+COMMAND, 0).
3052  *              flush_domains is set to CPU.
3053  *              invalidate_domains is set to COMMAND
3054  *              clflush is run to get data out of the CPU caches
3055  *              then i915_dev_set_domain calls i915_gem_flush to
3056  *              emit an MI_FLUSH and drm_agp_chipset_flush
3057  *      5. Unmapped from GTT
3058  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
3059  *              flush_domains and invalidate_domains end up both zero
3060  *              so no flushing/invalidating happens
3061  *      6. Freed
3062  *              yay, done
3063  *
3064  * Case 2: The shared render buffer
3065  *
3066  *      1. Allocated
3067  *      2. Mapped to GTT
3068  *      3. Read/written by GPU
3069  *      4. set_domain to (CPU,CPU)
3070  *      5. Read/written by CPU
3071  *      6. Read/written by GPU
3072  *
3073  *      1. Allocated
3074  *              Same as last example, (CPU, CPU)
3075  *      2. Mapped to GTT
3076  *              Nothing changes (assertions find that it is not in the GPU)
3077  *      3. Read/written by GPU
3078  *              execbuffer calls set_domain (RENDER, RENDER)
3079  *              flush_domains gets CPU
3080  *              invalidate_domains gets GPU
3081  *              clflush (obj)
3082  *              MI_FLUSH and drm_agp_chipset_flush
3083  *      4. set_domain (CPU, CPU)
3084  *              flush_domains gets GPU
3085  *              invalidate_domains gets CPU
3086  *              wait_rendering (obj) to make sure all drawing is complete.
3087  *              This will include an MI_FLUSH to get the data from GPU
3088  *              to memory
3089  *              clflush (obj) to invalidate the CPU cache
3090  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3091  *      5. Read/written by CPU
3092  *              cache lines are loaded and dirtied
3093  *      6. Read written by GPU
3094  *              Same as last GPU access
3095  *
3096  * Case 3: The constant buffer
3097  *
3098  *      1. Allocated
3099  *      2. Written by CPU
3100  *      3. Read by GPU
3101  *      4. Updated (written) by CPU again
3102  *      5. Read by GPU
3103  *
3104  *      1. Allocated
3105  *              (CPU, CPU)
3106  *      2. Written by CPU
3107  *              (CPU, CPU)
3108  *      3. Read by GPU
3109  *              (CPU+RENDER, 0)
3110  *              flush_domains = CPU
3111  *              invalidate_domains = RENDER
3112  *              clflush (obj)
3113  *              MI_FLUSH
3114  *              drm_agp_chipset_flush
3115  *      4. Updated (written) by CPU again
3116  *              (CPU, CPU)
3117  *              flush_domains = 0 (no previous write domain)
3118  *              invalidate_domains = 0 (no new read domains)
3119  *      5. Read by GPU
3120  *              (CPU+RENDER, 0)
3121  *              flush_domains = CPU
3122  *              invalidate_domains = RENDER
3123  *              clflush (obj)
3124  *              MI_FLUSH
3125  *              drm_agp_chipset_flush
3126  */
3127 static void
3128 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3129 {
3130         struct drm_device               *dev = obj->dev;
3131         struct drm_i915_private         *dev_priv = dev->dev_private;
3132         struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
3133         uint32_t                        invalidate_domains = 0;
3134         uint32_t                        flush_domains = 0;
3135         uint32_t                        old_read_domains;
3136
3137         intel_mark_busy(dev, obj);
3138
3139         /*
3140          * If the object isn't moving to a new write domain,
3141          * let the object stay in multiple read domains
3142          */
3143         if (obj->pending_write_domain == 0)
3144                 obj->pending_read_domains |= obj->read_domains;
3145         else
3146                 obj_priv->dirty = 1;
3147
3148         /*
3149          * Flush the current write domain if
3150          * the new read domains don't match. Invalidate
3151          * any read domains which differ from the old
3152          * write domain
3153          */
3154         if (obj->write_domain &&
3155             obj->write_domain != obj->pending_read_domains) {
3156                 flush_domains |= obj->write_domain;
3157                 invalidate_domains |=
3158                         obj->pending_read_domains & ~obj->write_domain;
3159         }
3160         /*
3161          * Invalidate any read caches which may have
3162          * stale data. That is, any new read domains.
3163          */
3164         invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3165         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
3166                 i915_gem_clflush_object(obj);
3167
3168         old_read_domains = obj->read_domains;
3169
3170         /* The actual obj->write_domain will be updated with
3171          * pending_write_domain after we emit the accumulated flush for all
3172          * of our domain changes in execbuffers (which clears objects'
3173          * write_domains).  So if we have a current write domain that we
3174          * aren't changing, set pending_write_domain to that.
3175          */
3176         if (flush_domains == 0 && obj->pending_write_domain == 0)
3177                 obj->pending_write_domain = obj->write_domain;
3178         obj->read_domains = obj->pending_read_domains;
3179
3180         dev->invalidate_domains |= invalidate_domains;
3181         dev->flush_domains |= flush_domains;
3182         if (obj_priv->ring)
3183                 dev_priv->mm.flush_rings |= obj_priv->ring->id;
3184
3185         trace_i915_gem_object_change_domain(obj,
3186                                             old_read_domains,
3187                                             obj->write_domain);
3188 }
3189
3190 /**
3191  * Moves the object from a partially CPU read to a full one.
3192  *
3193  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3194  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3195  */
3196 static void
3197 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3198 {
3199         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3200
3201         if (!obj_priv->page_cpu_valid)
3202                 return;
3203
3204         /* If we're partially in the CPU read domain, finish moving it in.
3205          */
3206         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3207                 int i;
3208
3209                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3210                         if (obj_priv->page_cpu_valid[i])
3211                                 continue;
3212                         drm_clflush_pages(obj_priv->pages + i, 1);
3213                 }
3214         }
3215
3216         /* Free the page_cpu_valid mappings which are now stale, whether
3217          * or not we've got I915_GEM_DOMAIN_CPU.
3218          */
3219         kfree(obj_priv->page_cpu_valid);
3220         obj_priv->page_cpu_valid = NULL;
3221 }
3222
3223 /**
3224  * Set the CPU read domain on a range of the object.
3225  *
3226  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3227  * not entirely valid.  The page_cpu_valid member of the object flags which
3228  * pages have been flushed, and will be respected by
3229  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3230  * of the whole object.
3231  *
3232  * This function returns when the move is complete, including waiting on
3233  * flushes to occur.
3234  */
3235 static int
3236 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3237                                           uint64_t offset, uint64_t size)
3238 {
3239         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3240         uint32_t old_read_domains;
3241         int i, ret;
3242
3243         if (offset == 0 && size == obj->size)
3244                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3245
3246         ret = i915_gem_object_flush_gpu_write_domain(obj, false);
3247         if (ret != 0)
3248                 return ret;
3249         i915_gem_object_flush_gtt_write_domain(obj);
3250
3251         /* If we're already fully in the CPU read domain, we're done. */
3252         if (obj_priv->page_cpu_valid == NULL &&
3253             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3254                 return 0;
3255
3256         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3257          * newly adding I915_GEM_DOMAIN_CPU
3258          */
3259         if (obj_priv->page_cpu_valid == NULL) {
3260                 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3261                                                    GFP_KERNEL);
3262                 if (obj_priv->page_cpu_valid == NULL)
3263                         return -ENOMEM;
3264         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3265                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3266
3267         /* Flush the cache on any pages that are still invalid from the CPU's
3268          * perspective.
3269          */
3270         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3271              i++) {
3272                 if (obj_priv->page_cpu_valid[i])
3273                         continue;
3274
3275                 drm_clflush_pages(obj_priv->pages + i, 1);
3276
3277                 obj_priv->page_cpu_valid[i] = 1;
3278         }
3279
3280         /* It should now be out of any other write domains, and we can update
3281          * the domain values for our changes.
3282          */
3283         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3284
3285         old_read_domains = obj->read_domains;
3286         obj->read_domains |= I915_GEM_DOMAIN_CPU;
3287
3288         trace_i915_gem_object_change_domain(obj,
3289                                             old_read_domains,
3290                                             obj->write_domain);
3291
3292         return 0;
3293 }
3294
3295 /**
3296  * Pin an object to the GTT and evaluate the relocations landing in it.
3297  */
3298 static int
3299 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3300                                  struct drm_file *file_priv,
3301                                  struct drm_i915_gem_exec_object2 *entry)
3302 {
3303         struct drm_device *dev = obj->dev;
3304         drm_i915_private_t *dev_priv = dev->dev_private;
3305         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3306         struct drm_i915_gem_relocation_entry __user *user_relocs;
3307         int i, ret;
3308         bool need_fence;
3309
3310         need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3311                      obj_priv->tiling_mode != I915_TILING_NONE;
3312
3313         /* Check fence reg constraints and rebind if necessary */
3314         if (need_fence &&
3315             !i915_gem_object_fence_offset_ok(obj,
3316                                              obj_priv->tiling_mode)) {
3317                 ret = i915_gem_object_unbind(obj);
3318                 if (ret)
3319                         return ret;
3320         }
3321
3322         /* Choose the GTT offset for our buffer and put it there. */
3323         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3324         if (ret)
3325                 return ret;
3326
3327         /*
3328          * Pre-965 chips need a fence register set up in order to
3329          * properly handle blits to/from tiled surfaces.
3330          */
3331         if (need_fence) {
3332                 ret = i915_gem_object_get_fence_reg(obj, true);
3333                 if (ret != 0) {
3334                         i915_gem_object_unpin(obj);
3335                         return ret;
3336                 }
3337
3338                 dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
3339         }
3340
3341         entry->offset = obj_priv->gtt_offset;
3342
3343         /* Apply the relocations, using the GTT aperture to avoid cache
3344          * flushing requirements.
3345          */
3346         user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3347         for (i = 0; i < entry->relocation_count; i++) {
3348                 struct drm_i915_gem_relocation_entry reloc;
3349                 struct drm_gem_object *target_obj;
3350                 struct drm_i915_gem_object *target_obj_priv;
3351
3352                 ret = __copy_from_user_inatomic(&reloc,
3353                                                 user_relocs+i,
3354                                                 sizeof(reloc));
3355                 if (ret) {
3356                         i915_gem_object_unpin(obj);
3357                         return -EFAULT;
3358                 }
3359
3360                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
3361                                                    reloc.target_handle);
3362                 if (target_obj == NULL) {
3363                         i915_gem_object_unpin(obj);
3364                         return -ENOENT;
3365                 }
3366                 target_obj_priv = to_intel_bo(target_obj);
3367
3368 #if WATCH_RELOC
3369                 DRM_INFO("%s: obj %p offset %08x target %d "
3370                          "read %08x write %08x gtt %08x "
3371                          "presumed %08x delta %08x\n",
3372                          __func__,
3373                          obj,
3374                          (int) reloc.offset,
3375                          (int) reloc.target_handle,
3376                          (int) reloc.read_domains,
3377                          (int) reloc.write_domain,
3378                          (int) target_obj_priv->gtt_offset,
3379                          (int) reloc.presumed_offset,
3380                          reloc.delta);
3381 #endif
3382
3383                 /* The target buffer should have appeared before us in the
3384                  * exec_object list, so it should have a GTT space bound by now.
3385                  */
3386                 if (target_obj_priv->gtt_space == NULL) {
3387                         DRM_ERROR("No GTT space found for object %d\n",
3388                                   reloc.target_handle);
3389                         drm_gem_object_unreference(target_obj);
3390                         i915_gem_object_unpin(obj);
3391                         return -EINVAL;
3392                 }
3393
3394                 /* Validate that the target is in a valid r/w GPU domain */
3395                 if (reloc.write_domain & (reloc.write_domain - 1)) {
3396                         DRM_ERROR("reloc with multiple write domains: "
3397                                   "obj %p target %d offset %d "
3398                                   "read %08x write %08x",
3399                                   obj, reloc.target_handle,
3400                                   (int) reloc.offset,
3401                                   reloc.read_domains,
3402                                   reloc.write_domain);
3403                         drm_gem_object_unreference(target_obj);
3404                         i915_gem_object_unpin(obj);
3405                         return -EINVAL;
3406                 }
3407                 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
3408                     reloc.read_domains & I915_GEM_DOMAIN_CPU) {
3409                         DRM_ERROR("reloc with read/write CPU domains: "
3410                                   "obj %p target %d offset %d "
3411                                   "read %08x write %08x",
3412                                   obj, reloc.target_handle,
3413                                   (int) reloc.offset,
3414                                   reloc.read_domains,
3415                                   reloc.write_domain);
3416                         drm_gem_object_unreference(target_obj);
3417                         i915_gem_object_unpin(obj);
3418                         return -EINVAL;
3419                 }
3420                 if (reloc.write_domain && target_obj->pending_write_domain &&
3421                     reloc.write_domain != target_obj->pending_write_domain) {
3422                         DRM_ERROR("Write domain conflict: "
3423                                   "obj %p target %d offset %d "
3424                                   "new %08x old %08x\n",
3425                                   obj, reloc.target_handle,
3426                                   (int) reloc.offset,
3427                                   reloc.write_domain,
3428                                   target_obj->pending_write_domain);
3429                         drm_gem_object_unreference(target_obj);
3430                         i915_gem_object_unpin(obj);
3431                         return -EINVAL;
3432                 }
3433
3434                 target_obj->pending_read_domains |= reloc.read_domains;
3435                 target_obj->pending_write_domain |= reloc.write_domain;
3436
3437                 /* If the relocation already has the right value in it, no
3438                  * more work needs to be done.
3439                  */
3440                 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
3441                         drm_gem_object_unreference(target_obj);
3442                         continue;
3443                 }
3444
3445                 /* Check that the relocation address is valid... */
3446                 if (reloc.offset > obj->size - 4) {
3447                         DRM_ERROR("Relocation beyond object bounds: "
3448                                   "obj %p target %d offset %d size %d.\n",
3449                                   obj, reloc.target_handle,
3450                                   (int) reloc.offset, (int) obj->size);
3451                         drm_gem_object_unreference(target_obj);
3452                         i915_gem_object_unpin(obj);
3453                         return -EINVAL;
3454                 }
3455                 if (reloc.offset & 3) {
3456                         DRM_ERROR("Relocation not 4-byte aligned: "
3457                                   "obj %p target %d offset %d.\n",
3458                                   obj, reloc.target_handle,
3459                                   (int) reloc.offset);
3460                         drm_gem_object_unreference(target_obj);
3461                         i915_gem_object_unpin(obj);
3462                         return -EINVAL;
3463                 }
3464
3465                 /* and points to somewhere within the target object. */
3466                 if (reloc.delta >= target_obj->size) {
3467                         DRM_ERROR("Relocation beyond target object bounds: "
3468                                   "obj %p target %d delta %d size %d.\n",
3469                                   obj, reloc.target_handle,
3470                                   (int) reloc.delta, (int) target_obj->size);
3471                         drm_gem_object_unreference(target_obj);
3472                         i915_gem_object_unpin(obj);
3473                         return -EINVAL;
3474                 }
3475
3476                 reloc.delta += target_obj_priv->gtt_offset;
3477                 if (obj->write_domain == I915_GEM_DOMAIN_CPU) {
3478                         uint32_t page_offset = reloc.offset & ~PAGE_MASK;
3479                         char *vaddr;
3480
3481                         vaddr = kmap_atomic(obj_priv->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
3482                         *(uint32_t *)(vaddr + page_offset) = reloc.delta;
3483                         kunmap_atomic(vaddr, KM_USER0);
3484                 } else {
3485                         uint32_t __iomem *reloc_entry;
3486                         void __iomem *reloc_page;
3487                         int ret;
3488
3489                         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3490                         if (ret) {
3491                                 drm_gem_object_unreference(target_obj);
3492                                 i915_gem_object_unpin(obj);
3493                                 return ret;
3494                         }
3495
3496                         /* Map the page containing the relocation we're going to perform.  */
3497                         reloc.offset += obj_priv->gtt_offset;
3498                         reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3499                                                               reloc.offset & PAGE_MASK,
3500                                                               KM_USER0);
3501                         reloc_entry = (uint32_t __iomem *)
3502                                 (reloc_page + (reloc.offset & ~PAGE_MASK));
3503                         iowrite32(reloc.delta, reloc_entry);
3504                         io_mapping_unmap_atomic(reloc_page, KM_USER0);
3505                 }
3506
3507                 drm_gem_object_unreference(target_obj);
3508         }
3509
3510         return 0;
3511 }
3512
3513 /* Throttle our rendering by waiting until the ring has completed our requests
3514  * emitted over 20 msec ago.
3515  *
3516  * Note that if we were to use the current jiffies each time around the loop,
3517  * we wouldn't escape the function with any frames outstanding if the time to
3518  * render a frame was over 20ms.
3519  *
3520  * This should get us reasonable parallelism between CPU and GPU but also
3521  * relatively low latency when blocking on a particular request to finish.
3522  */
3523 static int
3524 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3525 {
3526         struct drm_i915_private *dev_priv = dev->dev_private;
3527         struct drm_i915_file_private *file_priv = file->driver_priv;
3528         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3529         struct drm_i915_gem_request *request;
3530         struct intel_ring_buffer *ring = NULL;
3531         u32 seqno = 0;
3532         int ret;
3533
3534         spin_lock(&file_priv->mm.lock);
3535         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3536                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3537                         break;
3538
3539                 ring = request->ring;
3540                 seqno = request->seqno;
3541         }
3542         spin_unlock(&file_priv->mm.lock);
3543
3544         if (seqno == 0)
3545                 return 0;
3546
3547         ret = 0;
3548         if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
3549                 /* And wait for the seqno passing without holding any locks and
3550                  * causing extra latency for others. This is safe as the irq
3551                  * generation is designed to be run atomically and so is
3552                  * lockless.
3553                  */
3554                 ring->user_irq_get(dev, ring);
3555                 ret = wait_event_interruptible(ring->irq_queue,
3556                                                i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
3557                                                || atomic_read(&dev_priv->mm.wedged));
3558                 ring->user_irq_put(dev, ring);
3559
3560                 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3561                         ret = -EIO;
3562         }
3563
3564         if (ret == 0)
3565                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3566
3567         return ret;
3568 }
3569
3570 static int
3571 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
3572                           uint64_t exec_offset)
3573 {
3574         uint32_t exec_start, exec_len;
3575
3576         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3577         exec_len = (uint32_t) exec->batch_len;
3578
3579         if ((exec_start | exec_len) & 0x7)
3580                 return -EINVAL;
3581
3582         if (!exec_start)
3583                 return -EINVAL;
3584
3585         return 0;
3586 }
3587
3588 static int
3589 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3590                    int count)
3591 {
3592         int i;
3593
3594         for (i = 0; i < count; i++) {
3595                 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3596                 size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
3597
3598                 if (!access_ok(VERIFY_READ, ptr, length))
3599                         return -EFAULT;
3600
3601                 if (fault_in_pages_readable(ptr, length))
3602                         return -EFAULT;
3603         }
3604
3605         return 0;
3606 }
3607
3608 static int
3609 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3610                        struct drm_file *file_priv,
3611                        struct drm_i915_gem_execbuffer2 *args,
3612                        struct drm_i915_gem_exec_object2 *exec_list)
3613 {
3614         drm_i915_private_t *dev_priv = dev->dev_private;
3615         struct drm_gem_object **object_list = NULL;
3616         struct drm_gem_object *batch_obj;
3617         struct drm_i915_gem_object *obj_priv;
3618         struct drm_clip_rect *cliprects = NULL;
3619         struct drm_i915_gem_request *request = NULL;
3620         int ret, i, pinned = 0;
3621         uint64_t exec_offset;
3622         int pin_tries, flips;
3623
3624         struct intel_ring_buffer *ring = NULL;
3625
3626         ret = i915_gem_check_is_wedged(dev);
3627         if (ret)
3628                 return ret;
3629
3630         ret = validate_exec_list(exec_list, args->buffer_count);
3631         if (ret)
3632                 return ret;
3633
3634 #if WATCH_EXEC
3635         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3636                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3637 #endif
3638         if (args->flags & I915_EXEC_BSD) {
3639                 if (!HAS_BSD(dev)) {
3640                         DRM_ERROR("execbuf with wrong flag\n");
3641                         return -EINVAL;
3642                 }
3643                 ring = &dev_priv->bsd_ring;
3644         } else {
3645                 ring = &dev_priv->render_ring;
3646         }
3647
3648         if (args->buffer_count < 1) {
3649                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3650                 return -EINVAL;
3651         }
3652         object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3653         if (object_list == NULL) {
3654                 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3655                           args->buffer_count);
3656                 ret = -ENOMEM;
3657                 goto pre_mutex_err;
3658         }
3659
3660         if (args->num_cliprects != 0) {
3661                 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3662                                     GFP_KERNEL);
3663                 if (cliprects == NULL) {
3664                         ret = -ENOMEM;
3665                         goto pre_mutex_err;
3666                 }
3667
3668                 ret = copy_from_user(cliprects,
3669                                      (struct drm_clip_rect __user *)
3670                                      (uintptr_t) args->cliprects_ptr,
3671                                      sizeof(*cliprects) * args->num_cliprects);
3672                 if (ret != 0) {
3673                         DRM_ERROR("copy %d cliprects failed: %d\n",
3674                                   args->num_cliprects, ret);
3675                         ret = -EFAULT;
3676                         goto pre_mutex_err;
3677                 }
3678         }
3679
3680         request = kzalloc(sizeof(*request), GFP_KERNEL);
3681         if (request == NULL) {
3682                 ret = -ENOMEM;
3683                 goto pre_mutex_err;
3684         }
3685
3686         ret = i915_mutex_lock_interruptible(dev);
3687         if (ret)
3688                 goto pre_mutex_err;
3689
3690         if (dev_priv->mm.suspended) {
3691                 mutex_unlock(&dev->struct_mutex);
3692                 ret = -EBUSY;
3693                 goto pre_mutex_err;
3694         }
3695
3696         /* Look up object handles */
3697         for (i = 0; i < args->buffer_count; i++) {
3698                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3699                                                        exec_list[i].handle);
3700                 if (object_list[i] == NULL) {
3701                         DRM_ERROR("Invalid object handle %d at index %d\n",
3702                                    exec_list[i].handle, i);
3703                         /* prevent error path from reading uninitialized data */
3704                         args->buffer_count = i + 1;
3705                         ret = -ENOENT;
3706                         goto err;
3707                 }
3708
3709                 obj_priv = to_intel_bo(object_list[i]);
3710                 if (obj_priv->in_execbuffer) {
3711                         DRM_ERROR("Object %p appears more than once in object list\n",
3712                                    object_list[i]);
3713                         /* prevent error path from reading uninitialized data */
3714                         args->buffer_count = i + 1;
3715                         ret = -EINVAL;
3716                         goto err;
3717                 }
3718                 obj_priv->in_execbuffer = true;
3719         }
3720
3721         /* Pin and relocate */
3722         for (pin_tries = 0; ; pin_tries++) {
3723                 ret = 0;
3724
3725                 for (i = 0; i < args->buffer_count; i++) {
3726                         object_list[i]->pending_read_domains = 0;
3727                         object_list[i]->pending_write_domain = 0;
3728                         ret = i915_gem_object_pin_and_relocate(object_list[i],
3729                                                                file_priv,
3730                                                                &exec_list[i]);
3731                         if (ret)
3732                                 break;
3733                         pinned = i + 1;
3734                 }
3735                 /* success */
3736                 if (ret == 0)
3737                         break;
3738
3739                 /* error other than GTT full, or we've already tried again */
3740                 if (ret != -ENOSPC || pin_tries >= 1) {
3741                         if (ret != -ERESTARTSYS) {
3742                                 unsigned long long total_size = 0;
3743                                 int num_fences = 0;
3744                                 for (i = 0; i < args->buffer_count; i++) {
3745                                         obj_priv = to_intel_bo(object_list[i]);
3746
3747                                         total_size += object_list[i]->size;
3748                                         num_fences +=
3749                                                 exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE &&
3750                                                 obj_priv->tiling_mode != I915_TILING_NONE;
3751                                 }
3752                                 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n",
3753                                           pinned+1, args->buffer_count,
3754                                           total_size, num_fences,
3755                                           ret);
3756                                 DRM_ERROR("%u objects [%u pinned, %u GTT], "
3757                                           "%zu object bytes [%zu pinned], "
3758                                           "%zu /%zu gtt bytes\n",
3759                                           dev_priv->mm.object_count,
3760                                           dev_priv->mm.pin_count,
3761                                           dev_priv->mm.gtt_count,
3762                                           dev_priv->mm.object_memory,
3763                                           dev_priv->mm.pin_memory,
3764                                           dev_priv->mm.gtt_memory,
3765                                           dev_priv->mm.gtt_total);
3766                         }
3767                         goto err;
3768                 }
3769
3770                 /* unpin all of our buffers */
3771                 for (i = 0; i < pinned; i++)
3772                         i915_gem_object_unpin(object_list[i]);
3773                 pinned = 0;
3774
3775                 /* evict everyone we can from the aperture */
3776                 ret = i915_gem_evict_everything(dev);
3777                 if (ret && ret != -ENOSPC)
3778                         goto err;
3779         }
3780
3781         /* Set the pending read domains for the batch buffer to COMMAND */
3782         batch_obj = object_list[args->buffer_count-1];
3783         if (batch_obj->pending_write_domain) {
3784                 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3785                 ret = -EINVAL;
3786                 goto err;
3787         }
3788         batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3789
3790         /* Sanity check the batch buffer, prior to moving objects */
3791         exec_offset = exec_list[args->buffer_count - 1].offset;
3792         ret = i915_gem_check_execbuffer (args, exec_offset);
3793         if (ret != 0) {
3794                 DRM_ERROR("execbuf with invalid offset/length\n");
3795                 goto err;
3796         }
3797
3798         /* Zero the global flush/invalidate flags. These
3799          * will be modified as new domains are computed
3800          * for each object
3801          */
3802         dev->invalidate_domains = 0;
3803         dev->flush_domains = 0;
3804         dev_priv->mm.flush_rings = 0;
3805
3806         for (i = 0; i < args->buffer_count; i++) {
3807                 struct drm_gem_object *obj = object_list[i];
3808
3809                 /* Compute new gpu domains and update invalidate/flush */
3810                 i915_gem_object_set_to_gpu_domain(obj);
3811         }
3812
3813         if (dev->invalidate_domains | dev->flush_domains) {
3814 #if WATCH_EXEC
3815                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3816                           __func__,
3817                          dev->invalidate_domains,
3818                          dev->flush_domains);
3819 #endif
3820                 i915_gem_flush(dev, file_priv,
3821                                dev->invalidate_domains,
3822                                dev->flush_domains,
3823                                dev_priv->mm.flush_rings);
3824         }
3825
3826         for (i = 0; i < args->buffer_count; i++) {
3827                 struct drm_gem_object *obj = object_list[i];
3828                 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3829                 uint32_t old_write_domain = obj->write_domain;
3830
3831                 obj->write_domain = obj->pending_write_domain;
3832                 if (obj->write_domain)
3833                         list_move_tail(&obj_priv->gpu_write_list,
3834                                        &dev_priv->mm.gpu_write_list);
3835
3836                 trace_i915_gem_object_change_domain(obj,
3837                                                     obj->read_domains,
3838                                                     old_write_domain);
3839         }
3840
3841 #if WATCH_COHERENCY
3842         for (i = 0; i < args->buffer_count; i++) {
3843                 i915_gem_object_check_coherency(object_list[i],
3844                                                 exec_list[i].handle);
3845         }
3846 #endif
3847
3848 #if WATCH_EXEC
3849         i915_gem_dump_object(batch_obj,
3850                               args->batch_len,
3851                               __func__,
3852                               ~0);
3853 #endif
3854
3855         /* Check for any pending flips. As we only maintain a flip queue depth
3856          * of 1, we can simply insert a WAIT for the next display flip prior
3857          * to executing the batch and avoid stalling the CPU.
3858          */
3859         flips = 0;
3860         for (i = 0; i < args->buffer_count; i++) {
3861                 if (object_list[i]->write_domain)
3862                         flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
3863         }
3864         if (flips) {
3865                 int plane, flip_mask;
3866
3867                 for (plane = 0; flips >> plane; plane++) {
3868                         if (((flips >> plane) & 1) == 0)
3869                                 continue;
3870
3871                         if (plane)
3872                                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
3873                         else
3874                                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
3875
3876                         intel_ring_begin(dev, ring, 2);
3877                         intel_ring_emit(dev, ring,
3878                                         MI_WAIT_FOR_EVENT | flip_mask);
3879                         intel_ring_emit(dev, ring, MI_NOOP);
3880                         intel_ring_advance(dev, ring);
3881                 }
3882         }
3883
3884         /* Exec the batchbuffer */
3885         ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3886                                             cliprects, exec_offset);
3887         if (ret) {
3888                 DRM_ERROR("dispatch failed %d\n", ret);
3889                 goto err;
3890         }
3891
3892         /*
3893          * Ensure that the commands in the batch buffer are
3894          * finished before the interrupt fires
3895          */
3896         i915_retire_commands(dev, ring);
3897
3898         for (i = 0; i < args->buffer_count; i++) {
3899                 struct drm_gem_object *obj = object_list[i];
3900                 obj_priv = to_intel_bo(obj);
3901
3902                 i915_gem_object_move_to_active(obj, ring);
3903         }
3904
3905         i915_add_request(dev, file_priv, request, ring);
3906         request = NULL;
3907
3908 err:
3909         for (i = 0; i < pinned; i++)
3910                 i915_gem_object_unpin(object_list[i]);
3911
3912         for (i = 0; i < args->buffer_count; i++) {
3913                 if (object_list[i]) {
3914                         obj_priv = to_intel_bo(object_list[i]);
3915                         obj_priv->in_execbuffer = false;
3916                 }
3917                 drm_gem_object_unreference(object_list[i]);
3918         }
3919
3920         mutex_unlock(&dev->struct_mutex);
3921
3922 pre_mutex_err:
3923         drm_free_large(object_list);
3924         kfree(cliprects);
3925         kfree(request);
3926
3927         return ret;
3928 }
3929
3930 /*
3931  * Legacy execbuffer just creates an exec2 list from the original exec object
3932  * list array and passes it to the real function.
3933  */
3934 int
3935 i915_gem_execbuffer(struct drm_device *dev, void *data,
3936                     struct drm_file *file_priv)
3937 {
3938         struct drm_i915_gem_execbuffer *args = data;
3939         struct drm_i915_gem_execbuffer2 exec2;
3940         struct drm_i915_gem_exec_object *exec_list = NULL;
3941         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3942         int ret, i;
3943
3944 #if WATCH_EXEC
3945         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3946                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3947 #endif
3948
3949         if (args->buffer_count < 1) {
3950                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3951                 return -EINVAL;
3952         }
3953
3954         /* Copy in the exec list from userland */
3955         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3956         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3957         if (exec_list == NULL || exec2_list == NULL) {
3958                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3959                           args->buffer_count);
3960                 drm_free_large(exec_list);
3961                 drm_free_large(exec2_list);
3962                 return -ENOMEM;
3963         }
3964         ret = copy_from_user(exec_list,
3965                              (struct drm_i915_relocation_entry __user *)
3966                              (uintptr_t) args->buffers_ptr,
3967                              sizeof(*exec_list) * args->buffer_count);
3968         if (ret != 0) {
3969                 DRM_ERROR("copy %d exec entries failed %d\n",
3970                           args->buffer_count, ret);
3971                 drm_free_large(exec_list);
3972                 drm_free_large(exec2_list);
3973                 return -EFAULT;
3974         }
3975
3976         for (i = 0; i < args->buffer_count; i++) {
3977                 exec2_list[i].handle = exec_list[i].handle;
3978                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
3979                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3980                 exec2_list[i].alignment = exec_list[i].alignment;
3981                 exec2_list[i].offset = exec_list[i].offset;
3982                 if (INTEL_INFO(dev)->gen < 4)
3983                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3984                 else
3985                         exec2_list[i].flags = 0;
3986         }
3987
3988         exec2.buffers_ptr = args->buffers_ptr;
3989         exec2.buffer_count = args->buffer_count;
3990         exec2.batch_start_offset = args->batch_start_offset;
3991         exec2.batch_len = args->batch_len;
3992         exec2.DR1 = args->DR1;
3993         exec2.DR4 = args->DR4;
3994         exec2.num_cliprects = args->num_cliprects;
3995         exec2.cliprects_ptr = args->cliprects_ptr;
3996         exec2.flags = I915_EXEC_RENDER;
3997
3998         ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3999         if (!ret) {
4000                 /* Copy the new buffer offsets back to the user's exec list. */
4001                 for (i = 0; i < args->buffer_count; i++)
4002                         exec_list[i].offset = exec2_list[i].offset;
4003                 /* ... and back out to userspace */
4004                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4005                                    (uintptr_t) args->buffers_ptr,
4006                                    exec_list,
4007                                    sizeof(*exec_list) * args->buffer_count);
4008                 if (ret) {
4009                         ret = -EFAULT;
4010                         DRM_ERROR("failed to copy %d exec entries "
4011                                   "back to user (%d)\n",
4012                                   args->buffer_count, ret);
4013                 }
4014         }
4015
4016         drm_free_large(exec_list);
4017         drm_free_large(exec2_list);
4018         return ret;
4019 }
4020
4021 int
4022 i915_gem_execbuffer2(struct drm_device *dev, void *data,
4023                      struct drm_file *file_priv)
4024 {
4025         struct drm_i915_gem_execbuffer2 *args = data;
4026         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4027         int ret;
4028
4029 #if WATCH_EXEC
4030         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4031                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4032 #endif
4033
4034         if (args->buffer_count < 1) {
4035                 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4036                 return -EINVAL;
4037         }
4038
4039         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4040         if (exec2_list == NULL) {
4041                 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4042                           args->buffer_count);
4043                 return -ENOMEM;
4044         }
4045         ret = copy_from_user(exec2_list,
4046                              (struct drm_i915_relocation_entry __user *)
4047                              (uintptr_t) args->buffers_ptr,
4048                              sizeof(*exec2_list) * args->buffer_count);
4049         if (ret != 0) {
4050                 DRM_ERROR("copy %d exec entries failed %d\n",
4051                           args->buffer_count, ret);
4052                 drm_free_large(exec2_list);
4053                 return -EFAULT;
4054         }
4055
4056         ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4057         if (!ret) {
4058                 /* Copy the new buffer offsets back to the user's exec list. */
4059                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4060                                    (uintptr_t) args->buffers_ptr,
4061                                    exec2_list,
4062                                    sizeof(*exec2_list) * args->buffer_count);
4063                 if (ret) {
4064                         ret = -EFAULT;
4065                         DRM_ERROR("failed to copy %d exec entries "
4066                                   "back to user (%d)\n",
4067                                   args->buffer_count, ret);
4068                 }
4069         }
4070
4071         drm_free_large(exec2_list);
4072         return ret;
4073 }
4074
4075 int
4076 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4077 {
4078         struct drm_device *dev = obj->dev;
4079         struct drm_i915_private *dev_priv = dev->dev_private;
4080         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4081         int ret;
4082
4083         BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4084         WARN_ON(i915_verify_lists(dev));
4085
4086         if (obj_priv->gtt_space != NULL) {
4087                 if (alignment == 0)
4088                         alignment = i915_gem_get_gtt_alignment(obj);
4089                 if (obj_priv->gtt_offset & (alignment - 1)) {
4090                         WARN(obj_priv->pin_count,
4091                              "bo is already pinned with incorrect alignment:"
4092                              " offset=%x, req.alignment=%x\n",
4093                              obj_priv->gtt_offset, alignment);
4094                         ret = i915_gem_object_unbind(obj);
4095                         if (ret)
4096                                 return ret;
4097                 }
4098         }
4099
4100         if (obj_priv->gtt_space == NULL) {
4101                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
4102                 if (ret)
4103                         return ret;
4104         }
4105
4106         obj_priv->pin_count++;
4107
4108         /* If the object is not active and not pending a flush,
4109          * remove it from the inactive list
4110          */
4111         if (obj_priv->pin_count == 1) {
4112                 i915_gem_info_add_pin(dev_priv, obj->size);
4113                 if (!obj_priv->active)
4114                         list_move_tail(&obj_priv->list,
4115                                        &dev_priv->mm.pinned_list);
4116         }
4117
4118         WARN_ON(i915_verify_lists(dev));
4119         return 0;
4120 }
4121
4122 void
4123 i915_gem_object_unpin(struct drm_gem_object *obj)
4124 {
4125         struct drm_device *dev = obj->dev;
4126         drm_i915_private_t *dev_priv = dev->dev_private;
4127         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4128
4129         WARN_ON(i915_verify_lists(dev));
4130         obj_priv->pin_count--;
4131         BUG_ON(obj_priv->pin_count < 0);
4132         BUG_ON(obj_priv->gtt_space == NULL);
4133
4134         /* If the object is no longer pinned, and is
4135          * neither active nor being flushed, then stick it on
4136          * the inactive list
4137          */
4138         if (obj_priv->pin_count == 0) {
4139                 if (!obj_priv->active)
4140                         list_move_tail(&obj_priv->list,
4141                                        &dev_priv->mm.inactive_list);
4142                 i915_gem_info_remove_pin(dev_priv, obj->size);
4143         }
4144         WARN_ON(i915_verify_lists(dev));
4145 }
4146
4147 int
4148 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4149                    struct drm_file *file_priv)
4150 {
4151         struct drm_i915_gem_pin *args = data;
4152         struct drm_gem_object *obj;
4153         struct drm_i915_gem_object *obj_priv;
4154         int ret;
4155
4156         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4157         if (obj == NULL) {
4158                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
4159                           args->handle);
4160                 return -ENOENT;
4161         }
4162         obj_priv = to_intel_bo(obj);
4163
4164         ret = i915_mutex_lock_interruptible(dev);
4165         if (ret) {
4166                 drm_gem_object_unreference_unlocked(obj);
4167                 return ret;
4168         }
4169
4170         if (obj_priv->madv != I915_MADV_WILLNEED) {
4171                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4172                 drm_gem_object_unreference(obj);
4173                 mutex_unlock(&dev->struct_mutex);
4174                 return -EINVAL;
4175         }
4176
4177         if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
4178                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4179                           args->handle);
4180                 drm_gem_object_unreference(obj);
4181                 mutex_unlock(&dev->struct_mutex);
4182                 return -EINVAL;
4183         }
4184
4185         obj_priv->user_pin_count++;
4186         obj_priv->pin_filp = file_priv;
4187         if (obj_priv->user_pin_count == 1) {
4188                 ret = i915_gem_object_pin(obj, args->alignment);
4189                 if (ret != 0) {
4190                         drm_gem_object_unreference(obj);
4191                         mutex_unlock(&dev->struct_mutex);
4192                         return ret;
4193                 }
4194         }
4195
4196         /* XXX - flush the CPU caches for pinned objects
4197          * as the X server doesn't manage domains yet
4198          */
4199         i915_gem_object_flush_cpu_write_domain(obj);
4200         args->offset = obj_priv->gtt_offset;
4201         drm_gem_object_unreference(obj);
4202         mutex_unlock(&dev->struct_mutex);
4203
4204         return 0;
4205 }
4206
4207 int
4208 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4209                      struct drm_file *file_priv)
4210 {
4211         struct drm_i915_gem_pin *args = data;
4212         struct drm_gem_object *obj;
4213         struct drm_i915_gem_object *obj_priv;
4214         int ret;
4215
4216         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4217         if (obj == NULL) {
4218                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
4219                           args->handle);
4220                 return -ENOENT;
4221         }
4222
4223         obj_priv = to_intel_bo(obj);
4224
4225         ret = i915_mutex_lock_interruptible(dev);
4226         if (ret) {
4227                 drm_gem_object_unreference_unlocked(obj);
4228                 return ret;
4229         }
4230
4231         if (obj_priv->pin_filp != file_priv) {
4232                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4233                           args->handle);
4234                 drm_gem_object_unreference(obj);
4235                 mutex_unlock(&dev->struct_mutex);
4236                 return -EINVAL;
4237         }
4238         obj_priv->user_pin_count--;
4239         if (obj_priv->user_pin_count == 0) {
4240                 obj_priv->pin_filp = NULL;
4241                 i915_gem_object_unpin(obj);
4242         }
4243
4244         drm_gem_object_unreference(obj);
4245         mutex_unlock(&dev->struct_mutex);
4246         return 0;
4247 }
4248
4249 int
4250 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4251                     struct drm_file *file_priv)
4252 {
4253         struct drm_i915_gem_busy *args = data;
4254         struct drm_gem_object *obj;
4255         struct drm_i915_gem_object *obj_priv;
4256         int ret;
4257
4258         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4259         if (obj == NULL) {
4260                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4261                           args->handle);
4262                 return -ENOENT;
4263         }
4264
4265         ret = i915_mutex_lock_interruptible(dev);
4266         if (ret) {
4267                 drm_gem_object_unreference_unlocked(obj);
4268                 return ret;
4269         }
4270
4271         /* Count all active objects as busy, even if they are currently not used
4272          * by the gpu. Users of this interface expect objects to eventually
4273          * become non-busy without any further actions, therefore emit any
4274          * necessary flushes here.
4275          */
4276         obj_priv = to_intel_bo(obj);
4277         args->busy = obj_priv->active;
4278         if (args->busy) {
4279                 /* Unconditionally flush objects, even when the gpu still uses this
4280                  * object. Userspace calling this function indicates that it wants to
4281                  * use this buffer rather sooner than later, so issuing the required
4282                  * flush earlier is beneficial.
4283                  */
4284                 if (obj->write_domain & I915_GEM_GPU_DOMAINS)
4285                         i915_gem_flush_ring(dev, file_priv,
4286                                             obj_priv->ring,
4287                                             0, obj->write_domain);
4288
4289                 /* Update the active list for the hardware's current position.
4290                  * Otherwise this only updates on a delayed timer or when irqs
4291                  * are actually unmasked, and our working set ends up being
4292                  * larger than required.
4293                  */
4294                 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4295
4296                 args->busy = obj_priv->active;
4297         }
4298
4299         drm_gem_object_unreference(obj);
4300         mutex_unlock(&dev->struct_mutex);
4301         return 0;
4302 }
4303
4304 int
4305 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4306                         struct drm_file *file_priv)
4307 {
4308     return i915_gem_ring_throttle(dev, file_priv);
4309 }
4310
4311 int
4312 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4313                        struct drm_file *file_priv)
4314 {
4315         struct drm_i915_gem_madvise *args = data;
4316         struct drm_gem_object *obj;
4317         struct drm_i915_gem_object *obj_priv;
4318         int ret;
4319
4320         switch (args->madv) {
4321         case I915_MADV_DONTNEED:
4322         case I915_MADV_WILLNEED:
4323             break;
4324         default:
4325             return -EINVAL;
4326         }
4327
4328         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4329         if (obj == NULL) {
4330                 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4331                           args->handle);
4332                 return -ENOENT;
4333         }
4334         obj_priv = to_intel_bo(obj);
4335
4336         ret = i915_mutex_lock_interruptible(dev);
4337         if (ret) {
4338                 drm_gem_object_unreference_unlocked(obj);
4339                 return ret;
4340         }
4341
4342         if (obj_priv->pin_count) {
4343                 drm_gem_object_unreference(obj);
4344                 mutex_unlock(&dev->struct_mutex);
4345
4346                 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4347                 return -EINVAL;
4348         }
4349
4350         if (obj_priv->madv != __I915_MADV_PURGED)
4351                 obj_priv->madv = args->madv;
4352
4353         /* if the object is no longer bound, discard its backing storage */
4354         if (i915_gem_object_is_purgeable(obj_priv) &&
4355             obj_priv->gtt_space == NULL)
4356                 i915_gem_object_truncate(obj);
4357
4358         args->retained = obj_priv->madv != __I915_MADV_PURGED;
4359
4360         drm_gem_object_unreference(obj);
4361         mutex_unlock(&dev->struct_mutex);
4362
4363         return 0;
4364 }
4365
4366 struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4367                                               size_t size)
4368 {
4369         struct drm_i915_private *dev_priv = dev->dev_private;
4370         struct drm_i915_gem_object *obj;
4371
4372         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
4373         if (obj == NULL)
4374                 return NULL;
4375
4376         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4377                 kfree(obj);
4378                 return NULL;
4379         }
4380
4381         i915_gem_info_add_obj(dev_priv, size);
4382
4383         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4384         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4385
4386         obj->agp_type = AGP_USER_MEMORY;
4387         obj->base.driver_private = NULL;
4388         obj->fence_reg = I915_FENCE_REG_NONE;
4389         INIT_LIST_HEAD(&obj->list);
4390         INIT_LIST_HEAD(&obj->gpu_write_list);
4391         obj->madv = I915_MADV_WILLNEED;
4392
4393         return &obj->base;
4394 }
4395
4396 int i915_gem_init_object(struct drm_gem_object *obj)
4397 {
4398         BUG();
4399
4400         return 0;
4401 }
4402
4403 static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4404 {
4405         struct drm_device *dev = obj->dev;
4406         drm_i915_private_t *dev_priv = dev->dev_private;
4407         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4408         int ret;
4409
4410         ret = i915_gem_object_unbind(obj);
4411         if (ret == -ERESTARTSYS) {
4412                 list_move(&obj_priv->list,
4413                           &dev_priv->mm.deferred_free_list);
4414                 return;
4415         }
4416
4417         if (obj_priv->mmap_offset)
4418                 i915_gem_free_mmap_offset(obj);
4419
4420         drm_gem_object_release(obj);
4421         i915_gem_info_remove_obj(dev_priv, obj->size);
4422
4423         kfree(obj_priv->page_cpu_valid);
4424         kfree(obj_priv->bit_17);
4425         kfree(obj_priv);
4426 }
4427
4428 void i915_gem_free_object(struct drm_gem_object *obj)
4429 {
4430         struct drm_device *dev = obj->dev;
4431         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4432
4433         trace_i915_gem_object_destroy(obj);
4434
4435         while (obj_priv->pin_count > 0)
4436                 i915_gem_object_unpin(obj);
4437
4438         if (obj_priv->phys_obj)
4439                 i915_gem_detach_phys_object(dev, obj);
4440
4441         i915_gem_free_object_tail(obj);
4442 }
4443
4444 int
4445 i915_gem_idle(struct drm_device *dev)
4446 {
4447         drm_i915_private_t *dev_priv = dev->dev_private;
4448         int ret;
4449
4450         mutex_lock(&dev->struct_mutex);
4451
4452         if (dev_priv->mm.suspended ||
4453                         (dev_priv->render_ring.gem_object == NULL) ||
4454                         (HAS_BSD(dev) &&
4455                          dev_priv->bsd_ring.gem_object == NULL)) {
4456                 mutex_unlock(&dev->struct_mutex);
4457                 return 0;
4458         }
4459
4460         ret = i915_gpu_idle(dev);
4461         if (ret) {
4462                 mutex_unlock(&dev->struct_mutex);
4463                 return ret;
4464         }
4465
4466         /* Under UMS, be paranoid and evict. */
4467         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4468                 ret = i915_gem_evict_inactive(dev);
4469                 if (ret) {
4470                         mutex_unlock(&dev->struct_mutex);
4471                         return ret;
4472                 }
4473         }
4474
4475         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4476          * We need to replace this with a semaphore, or something.
4477          * And not confound mm.suspended!
4478          */
4479         dev_priv->mm.suspended = 1;
4480         del_timer_sync(&dev_priv->hangcheck_timer);
4481
4482         i915_kernel_lost_context(dev);
4483         i915_gem_cleanup_ringbuffer(dev);
4484
4485         mutex_unlock(&dev->struct_mutex);
4486
4487         /* Cancel the retire work handler, which should be idle now. */
4488         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4489
4490         return 0;
4491 }
4492
4493 /*
4494  * 965+ support PIPE_CONTROL commands, which provide finer grained control
4495  * over cache flushing.
4496  */
4497 static int
4498 i915_gem_init_pipe_control(struct drm_device *dev)
4499 {
4500         drm_i915_private_t *dev_priv = dev->dev_private;
4501         struct drm_gem_object *obj;
4502         struct drm_i915_gem_object *obj_priv;
4503         int ret;
4504
4505         obj = i915_gem_alloc_object(dev, 4096);
4506         if (obj == NULL) {
4507                 DRM_ERROR("Failed to allocate seqno page\n");
4508                 ret = -ENOMEM;
4509                 goto err;
4510         }
4511         obj_priv = to_intel_bo(obj);
4512         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4513
4514         ret = i915_gem_object_pin(obj, 4096);
4515         if (ret)
4516                 goto err_unref;
4517
4518         dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4519         dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
4520         if (dev_priv->seqno_page == NULL)
4521                 goto err_unpin;
4522
4523         dev_priv->seqno_obj = obj;
4524         memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4525
4526         return 0;
4527
4528 err_unpin:
4529         i915_gem_object_unpin(obj);
4530 err_unref:
4531         drm_gem_object_unreference(obj);
4532 err:
4533         return ret;
4534 }
4535
4536
4537 static void
4538 i915_gem_cleanup_pipe_control(struct drm_device *dev)
4539 {
4540         drm_i915_private_t *dev_priv = dev->dev_private;
4541         struct drm_gem_object *obj;
4542         struct drm_i915_gem_object *obj_priv;
4543
4544         obj = dev_priv->seqno_obj;
4545         obj_priv = to_intel_bo(obj);
4546         kunmap(obj_priv->pages[0]);
4547         i915_gem_object_unpin(obj);
4548         drm_gem_object_unreference(obj);
4549         dev_priv->seqno_obj = NULL;
4550
4551         dev_priv->seqno_page = NULL;
4552 }
4553
4554 int
4555 i915_gem_init_ringbuffer(struct drm_device *dev)
4556 {
4557         drm_i915_private_t *dev_priv = dev->dev_private;
4558         int ret;
4559
4560         if (HAS_PIPE_CONTROL(dev)) {
4561                 ret = i915_gem_init_pipe_control(dev);
4562                 if (ret)
4563                         return ret;
4564         }
4565
4566         ret = intel_init_render_ring_buffer(dev);
4567         if (ret)
4568                 goto cleanup_pipe_control;
4569
4570         if (HAS_BSD(dev)) {
4571                 ret = intel_init_bsd_ring_buffer(dev);
4572                 if (ret)
4573                         goto cleanup_render_ring;
4574         }
4575
4576         dev_priv->next_seqno = 1;
4577
4578         return 0;
4579
4580 cleanup_render_ring:
4581         intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4582 cleanup_pipe_control:
4583         if (HAS_PIPE_CONTROL(dev))
4584                 i915_gem_cleanup_pipe_control(dev);
4585         return ret;
4586 }
4587
4588 void
4589 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4590 {
4591         drm_i915_private_t *dev_priv = dev->dev_private;
4592
4593         intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4594         if (HAS_BSD(dev))
4595                 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4596         if (HAS_PIPE_CONTROL(dev))
4597                 i915_gem_cleanup_pipe_control(dev);
4598 }
4599
4600 int
4601 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4602                        struct drm_file *file_priv)
4603 {
4604         drm_i915_private_t *dev_priv = dev->dev_private;
4605         int ret;
4606
4607         if (drm_core_check_feature(dev, DRIVER_MODESET))
4608                 return 0;
4609
4610         if (atomic_read(&dev_priv->mm.wedged)) {
4611                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4612                 atomic_set(&dev_priv->mm.wedged, 0);
4613         }
4614
4615         mutex_lock(&dev->struct_mutex);
4616         dev_priv->mm.suspended = 0;
4617
4618         ret = i915_gem_init_ringbuffer(dev);
4619         if (ret != 0) {
4620                 mutex_unlock(&dev->struct_mutex);
4621                 return ret;
4622         }
4623
4624         BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4625         BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
4626         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4627         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4628         BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4629         BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4630         mutex_unlock(&dev->struct_mutex);
4631
4632         ret = drm_irq_install(dev);
4633         if (ret)
4634                 goto cleanup_ringbuffer;
4635
4636         return 0;
4637
4638 cleanup_ringbuffer:
4639         mutex_lock(&dev->struct_mutex);
4640         i915_gem_cleanup_ringbuffer(dev);
4641         dev_priv->mm.suspended = 1;
4642         mutex_unlock(&dev->struct_mutex);
4643
4644         return ret;
4645 }
4646
4647 int
4648 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4649                        struct drm_file *file_priv)
4650 {
4651         if (drm_core_check_feature(dev, DRIVER_MODESET))
4652                 return 0;
4653
4654         drm_irq_uninstall(dev);
4655         return i915_gem_idle(dev);
4656 }
4657
4658 void
4659 i915_gem_lastclose(struct drm_device *dev)
4660 {
4661         int ret;
4662
4663         if (drm_core_check_feature(dev, DRIVER_MODESET))
4664                 return;
4665
4666         ret = i915_gem_idle(dev);
4667         if (ret)
4668                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4669 }
4670
4671 void
4672 i915_gem_load(struct drm_device *dev)
4673 {
4674         int i;
4675         drm_i915_private_t *dev_priv = dev->dev_private;
4676
4677         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4678         INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4679         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4680         INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
4681         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4682         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4683         INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4684         INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4685         if (HAS_BSD(dev)) {
4686                 INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
4687                 INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
4688         }
4689         for (i = 0; i < 16; i++)
4690                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4691         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4692                           i915_gem_retire_work_handler);
4693         init_completion(&dev_priv->error_completion);
4694         spin_lock(&shrink_list_lock);
4695         list_add(&dev_priv->mm.shrink_list, &shrink_list);
4696         spin_unlock(&shrink_list_lock);
4697
4698         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4699         if (IS_GEN3(dev)) {
4700                 u32 tmp = I915_READ(MI_ARB_STATE);
4701                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
4702                         /* arb state is a masked write, so set bit + bit in mask */
4703                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
4704                         I915_WRITE(MI_ARB_STATE, tmp);
4705                 }
4706         }
4707
4708         /* Old X drivers will take 0-2 for front, back, depth buffers */
4709         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4710                 dev_priv->fence_reg_start = 3;
4711
4712         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4713                 dev_priv->num_fence_regs = 16;
4714         else
4715                 dev_priv->num_fence_regs = 8;
4716
4717         /* Initialize fence registers to zero */
4718         switch (INTEL_INFO(dev)->gen) {
4719         case 6:
4720                 for (i = 0; i < 16; i++)
4721                         I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
4722                 break;
4723         case 5:
4724         case 4:
4725                 for (i = 0; i < 16; i++)
4726                         I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4727                 break;
4728         case 3:
4729                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4730                         for (i = 0; i < 8; i++)
4731                                 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4732         case 2:
4733                 for (i = 0; i < 8; i++)
4734                         I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4735                 break;
4736         }
4737         i915_gem_detect_bit_6_swizzle(dev);
4738         init_waitqueue_head(&dev_priv->pending_flip_queue);
4739 }
4740
4741 /*
4742  * Create a physically contiguous memory object for this object
4743  * e.g. for cursor + overlay regs
4744  */
4745 static int i915_gem_init_phys_object(struct drm_device *dev,
4746                                      int id, int size, int align)
4747 {
4748         drm_i915_private_t *dev_priv = dev->dev_private;
4749         struct drm_i915_gem_phys_object *phys_obj;
4750         int ret;
4751
4752         if (dev_priv->mm.phys_objs[id - 1] || !size)
4753                 return 0;
4754
4755         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4756         if (!phys_obj)
4757                 return -ENOMEM;
4758
4759         phys_obj->id = id;
4760
4761         phys_obj->handle = drm_pci_alloc(dev, size, align);
4762         if (!phys_obj->handle) {
4763                 ret = -ENOMEM;
4764                 goto kfree_obj;
4765         }
4766 #ifdef CONFIG_X86
4767         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4768 #endif
4769
4770         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4771
4772         return 0;
4773 kfree_obj:
4774         kfree(phys_obj);
4775         return ret;
4776 }
4777
4778 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4779 {
4780         drm_i915_private_t *dev_priv = dev->dev_private;
4781         struct drm_i915_gem_phys_object *phys_obj;
4782
4783         if (!dev_priv->mm.phys_objs[id - 1])
4784                 return;
4785
4786         phys_obj = dev_priv->mm.phys_objs[id - 1];
4787         if (phys_obj->cur_obj) {
4788                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4789         }
4790
4791 #ifdef CONFIG_X86
4792         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4793 #endif
4794         drm_pci_free(dev, phys_obj->handle);
4795         kfree(phys_obj);
4796         dev_priv->mm.phys_objs[id - 1] = NULL;
4797 }
4798
4799 void i915_gem_free_all_phys_object(struct drm_device *dev)
4800 {
4801         int i;
4802
4803         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4804                 i915_gem_free_phys_object(dev, i);
4805 }
4806
4807 void i915_gem_detach_phys_object(struct drm_device *dev,
4808                                  struct drm_gem_object *obj)
4809 {
4810         struct drm_i915_gem_object *obj_priv;
4811         int i;
4812         int ret;
4813         int page_count;
4814
4815         obj_priv = to_intel_bo(obj);
4816         if (!obj_priv->phys_obj)
4817                 return;
4818
4819         ret = i915_gem_object_get_pages(obj, 0);
4820         if (ret)
4821                 goto out;
4822
4823         page_count = obj->size / PAGE_SIZE;
4824
4825         for (i = 0; i < page_count; i++) {
4826                 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4827                 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4828
4829                 memcpy(dst, src, PAGE_SIZE);
4830                 kunmap_atomic(dst, KM_USER0);
4831         }
4832         drm_clflush_pages(obj_priv->pages, page_count);
4833         drm_agp_chipset_flush(dev);
4834
4835         i915_gem_object_put_pages(obj);
4836 out:
4837         obj_priv->phys_obj->cur_obj = NULL;
4838         obj_priv->phys_obj = NULL;
4839 }
4840
4841 int
4842 i915_gem_attach_phys_object(struct drm_device *dev,
4843                             struct drm_gem_object *obj,
4844                             int id,
4845                             int align)
4846 {
4847         drm_i915_private_t *dev_priv = dev->dev_private;
4848         struct drm_i915_gem_object *obj_priv;
4849         int ret = 0;
4850         int page_count;
4851         int i;
4852
4853         if (id > I915_MAX_PHYS_OBJECT)
4854                 return -EINVAL;
4855
4856         obj_priv = to_intel_bo(obj);
4857
4858         if (obj_priv->phys_obj) {
4859                 if (obj_priv->phys_obj->id == id)
4860                         return 0;
4861                 i915_gem_detach_phys_object(dev, obj);
4862         }
4863
4864         /* create a new object */
4865         if (!dev_priv->mm.phys_objs[id - 1]) {
4866                 ret = i915_gem_init_phys_object(dev, id,
4867                                                 obj->size, align);
4868                 if (ret) {
4869                         DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4870                         goto out;
4871                 }
4872         }
4873
4874         /* bind to the object */
4875         obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4876         obj_priv->phys_obj->cur_obj = obj;
4877
4878         ret = i915_gem_object_get_pages(obj, 0);
4879         if (ret) {
4880                 DRM_ERROR("failed to get page list\n");
4881                 goto out;
4882         }
4883
4884         page_count = obj->size / PAGE_SIZE;
4885
4886         for (i = 0; i < page_count; i++) {
4887                 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4888                 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4889
4890                 memcpy(dst, src, PAGE_SIZE);
4891                 kunmap_atomic(src, KM_USER0);
4892         }
4893
4894         i915_gem_object_put_pages(obj);
4895
4896         return 0;
4897 out:
4898         return ret;
4899 }
4900
4901 static int
4902 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4903                      struct drm_i915_gem_pwrite *args,
4904                      struct drm_file *file_priv)
4905 {
4906         struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4907         void *obj_addr;
4908         int ret;
4909         char __user *user_data;
4910
4911         user_data = (char __user *) (uintptr_t) args->data_ptr;
4912         obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4913
4914         DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4915         ret = copy_from_user(obj_addr, user_data, args->size);
4916         if (ret)
4917                 return -EFAULT;
4918
4919         drm_agp_chipset_flush(dev);
4920         return 0;
4921 }
4922
4923 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4924 {
4925         struct drm_i915_file_private *file_priv = file->driver_priv;
4926
4927         /* Clean up our request list when the client is going away, so that
4928          * later retire_requests won't dereference our soon-to-be-gone
4929          * file_priv.
4930          */
4931         spin_lock(&file_priv->mm.lock);
4932         while (!list_empty(&file_priv->mm.request_list)) {
4933                 struct drm_i915_gem_request *request;
4934
4935                 request = list_first_entry(&file_priv->mm.request_list,
4936                                            struct drm_i915_gem_request,
4937                                            client_list);
4938                 list_del(&request->client_list);
4939                 request->file_priv = NULL;
4940         }
4941         spin_unlock(&file_priv->mm.lock);
4942 }
4943
4944 static int
4945 i915_gpu_is_active(struct drm_device *dev)
4946 {
4947         drm_i915_private_t *dev_priv = dev->dev_private;
4948         int lists_empty;
4949
4950         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4951                       list_empty(&dev_priv->render_ring.active_list);
4952         if (HAS_BSD(dev))
4953                 lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
4954
4955         return !lists_empty;
4956 }
4957
4958 static int
4959 i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
4960 {
4961         drm_i915_private_t *dev_priv, *next_dev;
4962         struct drm_i915_gem_object *obj_priv, *next_obj;
4963         int cnt = 0;
4964         int would_deadlock = 1;
4965
4966         /* "fast-path" to count number of available objects */
4967         if (nr_to_scan == 0) {
4968                 spin_lock(&shrink_list_lock);
4969                 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4970                         struct drm_device *dev = dev_priv->dev;
4971
4972                         if (mutex_trylock(&dev->struct_mutex)) {
4973                                 list_for_each_entry(obj_priv,
4974                                                     &dev_priv->mm.inactive_list,
4975                                                     list)
4976                                         cnt++;
4977                                 mutex_unlock(&dev->struct_mutex);
4978                         }
4979                 }
4980                 spin_unlock(&shrink_list_lock);
4981
4982                 return (cnt / 100) * sysctl_vfs_cache_pressure;
4983         }
4984
4985         spin_lock(&shrink_list_lock);
4986
4987 rescan:
4988         /* first scan for clean buffers */
4989         list_for_each_entry_safe(dev_priv, next_dev,
4990                                  &shrink_list, mm.shrink_list) {
4991                 struct drm_device *dev = dev_priv->dev;
4992
4993                 if (! mutex_trylock(&dev->struct_mutex))
4994                         continue;
4995
4996                 spin_unlock(&shrink_list_lock);
4997                 i915_gem_retire_requests(dev);
4998
4999                 list_for_each_entry_safe(obj_priv, next_obj,
5000                                          &dev_priv->mm.inactive_list,
5001                                          list) {
5002                         if (i915_gem_object_is_purgeable(obj_priv)) {
5003                                 i915_gem_object_unbind(&obj_priv->base);
5004                                 if (--nr_to_scan <= 0)
5005                                         break;
5006                         }
5007                 }
5008
5009                 spin_lock(&shrink_list_lock);
5010                 mutex_unlock(&dev->struct_mutex);
5011
5012                 would_deadlock = 0;
5013
5014                 if (nr_to_scan <= 0)
5015                         break;
5016         }
5017
5018         /* second pass, evict/count anything still on the inactive list */
5019         list_for_each_entry_safe(dev_priv, next_dev,
5020                                  &shrink_list, mm.shrink_list) {
5021                 struct drm_device *dev = dev_priv->dev;
5022
5023                 if (! mutex_trylock(&dev->struct_mutex))
5024                         continue;
5025
5026                 spin_unlock(&shrink_list_lock);
5027
5028                 list_for_each_entry_safe(obj_priv, next_obj,
5029                                          &dev_priv->mm.inactive_list,
5030                                          list) {
5031                         if (nr_to_scan > 0) {
5032                                 i915_gem_object_unbind(&obj_priv->base);
5033                                 nr_to_scan--;
5034                         } else
5035                                 cnt++;
5036                 }
5037
5038                 spin_lock(&shrink_list_lock);
5039                 mutex_unlock(&dev->struct_mutex);
5040
5041                 would_deadlock = 0;
5042         }
5043
5044         if (nr_to_scan) {
5045                 int active = 0;
5046
5047                 /*
5048                  * We are desperate for pages, so as a last resort, wait
5049                  * for the GPU to finish and discard whatever we can.
5050                  * This has a dramatic impact to reduce the number of
5051                  * OOM-killer events whilst running the GPU aggressively.
5052                  */
5053                 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5054                         struct drm_device *dev = dev_priv->dev;
5055
5056                         if (!mutex_trylock(&dev->struct_mutex))
5057                                 continue;
5058
5059                         spin_unlock(&shrink_list_lock);
5060
5061                         if (i915_gpu_is_active(dev)) {
5062                                 i915_gpu_idle(dev);
5063                                 active++;
5064                         }
5065
5066                         spin_lock(&shrink_list_lock);
5067                         mutex_unlock(&dev->struct_mutex);
5068                 }
5069
5070                 if (active)
5071                         goto rescan;
5072         }
5073
5074         spin_unlock(&shrink_list_lock);
5075
5076         if (would_deadlock)
5077                 return -1;
5078         else if (cnt > 0)
5079                 return (cnt / 100) * sysctl_vfs_cache_pressure;
5080         else
5081                 return 0;
5082 }
5083
5084 static struct shrinker shrinker = {
5085         .shrink = i915_gem_shrink,
5086         .seeks = DEFAULT_SEEKS,
5087 };
5088
5089 __init void
5090 i915_gem_shrinker_init(void)
5091 {
5092     register_shrinker(&shrinker);
5093 }
5094
5095 __exit void
5096 i915_gem_shrinker_exit(void)
5097 {
5098     unregister_shrinker(&shrinker);
5099 }