]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem_execbuffer.c
Merge tag 'devicetree-for-linus' of git://git.secretlab.ca/git/linux-2.6
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 #include <linux/dma_remapping.h>
36
37 struct change_domains {
38         uint32_t invalidate_domains;
39         uint32_t flush_domains;
40         uint32_t flush_rings;
41         uint32_t flips;
42 };
43
44 /*
45  * Set the next domain for the specified object. This
46  * may not actually perform the necessary flushing/invaliding though,
47  * as that may want to be batched with other set_domain operations
48  *
49  * This is (we hope) the only really tricky part of gem. The goal
50  * is fairly simple -- track which caches hold bits of the object
51  * and make sure they remain coherent. A few concrete examples may
52  * help to explain how it works. For shorthand, we use the notation
53  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
54  * a pair of read and write domain masks.
55  *
56  * Case 1: the batch buffer
57  *
58  *      1. Allocated
59  *      2. Written by CPU
60  *      3. Mapped to GTT
61  *      4. Read by GPU
62  *      5. Unmapped from GTT
63  *      6. Freed
64  *
65  *      Let's take these a step at a time
66  *
67  *      1. Allocated
68  *              Pages allocated from the kernel may still have
69  *              cache contents, so we set them to (CPU, CPU) always.
70  *      2. Written by CPU (using pwrite)
71  *              The pwrite function calls set_domain (CPU, CPU) and
72  *              this function does nothing (as nothing changes)
73  *      3. Mapped by GTT
74  *              This function asserts that the object is not
75  *              currently in any GPU-based read or write domains
76  *      4. Read by GPU
77  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
78  *              As write_domain is zero, this function adds in the
79  *              current read domains (CPU+COMMAND, 0).
80  *              flush_domains is set to CPU.
81  *              invalidate_domains is set to COMMAND
82  *              clflush is run to get data out of the CPU caches
83  *              then i915_dev_set_domain calls i915_gem_flush to
84  *              emit an MI_FLUSH and drm_agp_chipset_flush
85  *      5. Unmapped from GTT
86  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
87  *              flush_domains and invalidate_domains end up both zero
88  *              so no flushing/invalidating happens
89  *      6. Freed
90  *              yay, done
91  *
92  * Case 2: The shared render buffer
93  *
94  *      1. Allocated
95  *      2. Mapped to GTT
96  *      3. Read/written by GPU
97  *      4. set_domain to (CPU,CPU)
98  *      5. Read/written by CPU
99  *      6. Read/written by GPU
100  *
101  *      1. Allocated
102  *              Same as last example, (CPU, CPU)
103  *      2. Mapped to GTT
104  *              Nothing changes (assertions find that it is not in the GPU)
105  *      3. Read/written by GPU
106  *              execbuffer calls set_domain (RENDER, RENDER)
107  *              flush_domains gets CPU
108  *              invalidate_domains gets GPU
109  *              clflush (obj)
110  *              MI_FLUSH and drm_agp_chipset_flush
111  *      4. set_domain (CPU, CPU)
112  *              flush_domains gets GPU
113  *              invalidate_domains gets CPU
114  *              wait_rendering (obj) to make sure all drawing is complete.
115  *              This will include an MI_FLUSH to get the data from GPU
116  *              to memory
117  *              clflush (obj) to invalidate the CPU cache
118  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
119  *      5. Read/written by CPU
120  *              cache lines are loaded and dirtied
121  *      6. Read written by GPU
122  *              Same as last GPU access
123  *
124  * Case 3: The constant buffer
125  *
126  *      1. Allocated
127  *      2. Written by CPU
128  *      3. Read by GPU
129  *      4. Updated (written) by CPU again
130  *      5. Read by GPU
131  *
132  *      1. Allocated
133  *              (CPU, CPU)
134  *      2. Written by CPU
135  *              (CPU, CPU)
136  *      3. Read by GPU
137  *              (CPU+RENDER, 0)
138  *              flush_domains = CPU
139  *              invalidate_domains = RENDER
140  *              clflush (obj)
141  *              MI_FLUSH
142  *              drm_agp_chipset_flush
143  *      4. Updated (written) by CPU again
144  *              (CPU, CPU)
145  *              flush_domains = 0 (no previous write domain)
146  *              invalidate_domains = 0 (no new read domains)
147  *      5. Read by GPU
148  *              (CPU+RENDER, 0)
149  *              flush_domains = CPU
150  *              invalidate_domains = RENDER
151  *              clflush (obj)
152  *              MI_FLUSH
153  *              drm_agp_chipset_flush
154  */
155 static void
156 i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
157                                   struct intel_ring_buffer *ring,
158                                   struct change_domains *cd)
159 {
160         uint32_t invalidate_domains = 0, flush_domains = 0;
161
162         /*
163          * If the object isn't moving to a new write domain,
164          * let the object stay in multiple read domains
165          */
166         if (obj->base.pending_write_domain == 0)
167                 obj->base.pending_read_domains |= obj->base.read_domains;
168
169         /*
170          * Flush the current write domain if
171          * the new read domains don't match. Invalidate
172          * any read domains which differ from the old
173          * write domain
174          */
175         if (obj->base.write_domain &&
176             (((obj->base.write_domain != obj->base.pending_read_domains ||
177                obj->ring != ring)) ||
178              (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
179                 flush_domains |= obj->base.write_domain;
180                 invalidate_domains |=
181                         obj->base.pending_read_domains & ~obj->base.write_domain;
182         }
183         /*
184          * Invalidate any read caches which may have
185          * stale data. That is, any new read domains.
186          */
187         invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
188         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
189                 i915_gem_clflush_object(obj);
190
191         if (obj->base.pending_write_domain)
192                 cd->flips |= atomic_read(&obj->pending_flip);
193
194         /* The actual obj->write_domain will be updated with
195          * pending_write_domain after we emit the accumulated flush for all
196          * of our domain changes in execbuffers (which clears objects'
197          * write_domains).  So if we have a current write domain that we
198          * aren't changing, set pending_write_domain to that.
199          */
200         if (flush_domains == 0 && obj->base.pending_write_domain == 0)
201                 obj->base.pending_write_domain = obj->base.write_domain;
202
203         cd->invalidate_domains |= invalidate_domains;
204         cd->flush_domains |= flush_domains;
205         if (flush_domains & I915_GEM_GPU_DOMAINS)
206                 cd->flush_rings |= intel_ring_flag(obj->ring);
207         if (invalidate_domains & I915_GEM_GPU_DOMAINS)
208                 cd->flush_rings |= intel_ring_flag(ring);
209 }
210
211 struct eb_objects {
212         int and;
213         struct hlist_head buckets[0];
214 };
215
216 static struct eb_objects *
217 eb_create(int size)
218 {
219         struct eb_objects *eb;
220         int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
221         while (count > size)
222                 count >>= 1;
223         eb = kzalloc(count*sizeof(struct hlist_head) +
224                      sizeof(struct eb_objects),
225                      GFP_KERNEL);
226         if (eb == NULL)
227                 return eb;
228
229         eb->and = count - 1;
230         return eb;
231 }
232
233 static void
234 eb_reset(struct eb_objects *eb)
235 {
236         memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
237 }
238
239 static void
240 eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
241 {
242         hlist_add_head(&obj->exec_node,
243                        &eb->buckets[obj->exec_handle & eb->and]);
244 }
245
246 static struct drm_i915_gem_object *
247 eb_get_object(struct eb_objects *eb, unsigned long handle)
248 {
249         struct hlist_head *head;
250         struct hlist_node *node;
251         struct drm_i915_gem_object *obj;
252
253         head = &eb->buckets[handle & eb->and];
254         hlist_for_each(node, head) {
255                 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
256                 if (obj->exec_handle == handle)
257                         return obj;
258         }
259
260         return NULL;
261 }
262
263 static void
264 eb_destroy(struct eb_objects *eb)
265 {
266         kfree(eb);
267 }
268
269 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
270 {
271         return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
272                 obj->cache_level != I915_CACHE_NONE);
273 }
274
275 static int
276 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
277                                    struct eb_objects *eb,
278                                    struct drm_i915_gem_relocation_entry *reloc)
279 {
280         struct drm_device *dev = obj->base.dev;
281         struct drm_gem_object *target_obj;
282         struct drm_i915_gem_object *target_i915_obj;
283         uint32_t target_offset;
284         int ret = -EINVAL;
285
286         /* we've already hold a reference to all valid objects */
287         target_obj = &eb_get_object(eb, reloc->target_handle)->base;
288         if (unlikely(target_obj == NULL))
289                 return -ENOENT;
290
291         target_i915_obj = to_intel_bo(target_obj);
292         target_offset = target_i915_obj->gtt_offset;
293
294         /* The target buffer should have appeared before us in the
295          * exec_object list, so it should have a GTT space bound by now.
296          */
297         if (unlikely(target_offset == 0)) {
298                 DRM_DEBUG("No GTT space found for object %d\n",
299                           reloc->target_handle);
300                 return ret;
301         }
302
303         /* Validate that the target is in a valid r/w GPU domain */
304         if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
305                 DRM_DEBUG("reloc with multiple write domains: "
306                           "obj %p target %d offset %d "
307                           "read %08x write %08x",
308                           obj, reloc->target_handle,
309                           (int) reloc->offset,
310                           reloc->read_domains,
311                           reloc->write_domain);
312                 return ret;
313         }
314         if (unlikely((reloc->write_domain | reloc->read_domains)
315                      & ~I915_GEM_GPU_DOMAINS)) {
316                 DRM_DEBUG("reloc with read/write non-GPU domains: "
317                           "obj %p target %d offset %d "
318                           "read %08x write %08x",
319                           obj, reloc->target_handle,
320                           (int) reloc->offset,
321                           reloc->read_domains,
322                           reloc->write_domain);
323                 return ret;
324         }
325         if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
326                      reloc->write_domain != target_obj->pending_write_domain)) {
327                 DRM_DEBUG("Write domain conflict: "
328                           "obj %p target %d offset %d "
329                           "new %08x old %08x\n",
330                           obj, reloc->target_handle,
331                           (int) reloc->offset,
332                           reloc->write_domain,
333                           target_obj->pending_write_domain);
334                 return ret;
335         }
336
337         target_obj->pending_read_domains |= reloc->read_domains;
338         target_obj->pending_write_domain |= reloc->write_domain;
339
340         /* If the relocation already has the right value in it, no
341          * more work needs to be done.
342          */
343         if (target_offset == reloc->presumed_offset)
344                 return 0;
345
346         /* Check that the relocation address is valid... */
347         if (unlikely(reloc->offset > obj->base.size - 4)) {
348                 DRM_DEBUG("Relocation beyond object bounds: "
349                           "obj %p target %d offset %d size %d.\n",
350                           obj, reloc->target_handle,
351                           (int) reloc->offset,
352                           (int) obj->base.size);
353                 return ret;
354         }
355         if (unlikely(reloc->offset & 3)) {
356                 DRM_DEBUG("Relocation not 4-byte aligned: "
357                           "obj %p target %d offset %d.\n",
358                           obj, reloc->target_handle,
359                           (int) reloc->offset);
360                 return ret;
361         }
362
363         /* We can't wait for rendering with pagefaults disabled */
364         if (obj->active && in_atomic())
365                 return -EFAULT;
366
367         reloc->delta += target_offset;
368         if (use_cpu_reloc(obj)) {
369                 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
370                 char *vaddr;
371
372                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
373                 if (ret)
374                         return ret;
375
376                 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
377                 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
378                 kunmap_atomic(vaddr);
379         } else {
380                 struct drm_i915_private *dev_priv = dev->dev_private;
381                 uint32_t __iomem *reloc_entry;
382                 void __iomem *reloc_page;
383
384                 ret = i915_gem_object_set_to_gtt_domain(obj, true);
385                 if (ret)
386                         return ret;
387
388                 ret = i915_gem_object_put_fence(obj);
389                 if (ret)
390                         return ret;
391
392                 /* Map the page containing the relocation we're going to perform.  */
393                 reloc->offset += obj->gtt_offset;
394                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
395                                                       reloc->offset & PAGE_MASK);
396                 reloc_entry = (uint32_t __iomem *)
397                         (reloc_page + (reloc->offset & ~PAGE_MASK));
398                 iowrite32(reloc->delta, reloc_entry);
399                 io_mapping_unmap_atomic(reloc_page);
400         }
401
402         /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
403          * pipe_control writes because the gpu doesn't properly redirect them
404          * through the ppgtt for non_secure batchbuffers. */
405         if (unlikely(IS_GEN6(dev) &&
406             reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
407             !target_i915_obj->has_global_gtt_mapping)) {
408                 i915_gem_gtt_bind_object(target_i915_obj,
409                                          target_i915_obj->cache_level);
410         }
411
412         /* and update the user's relocation entry */
413         reloc->presumed_offset = target_offset;
414
415         return 0;
416 }
417
418 static int
419 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
420                                     struct eb_objects *eb)
421 {
422 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
423         struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
424         struct drm_i915_gem_relocation_entry __user *user_relocs;
425         struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
426         int remain, ret;
427
428         user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
429
430         remain = entry->relocation_count;
431         while (remain) {
432                 struct drm_i915_gem_relocation_entry *r = stack_reloc;
433                 int count = remain;
434                 if (count > ARRAY_SIZE(stack_reloc))
435                         count = ARRAY_SIZE(stack_reloc);
436                 remain -= count;
437
438                 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
439                         return -EFAULT;
440
441                 do {
442                         u64 offset = r->presumed_offset;
443
444                         ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
445                         if (ret)
446                                 return ret;
447
448                         if (r->presumed_offset != offset &&
449                             __copy_to_user_inatomic(&user_relocs->presumed_offset,
450                                                     &r->presumed_offset,
451                                                     sizeof(r->presumed_offset))) {
452                                 return -EFAULT;
453                         }
454
455                         user_relocs++;
456                         r++;
457                 } while (--count);
458         }
459
460         return 0;
461 #undef N_RELOC
462 }
463
464 static int
465 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
466                                          struct eb_objects *eb,
467                                          struct drm_i915_gem_relocation_entry *relocs)
468 {
469         const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
470         int i, ret;
471
472         for (i = 0; i < entry->relocation_count; i++) {
473                 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
474                 if (ret)
475                         return ret;
476         }
477
478         return 0;
479 }
480
481 static int
482 i915_gem_execbuffer_relocate(struct drm_device *dev,
483                              struct eb_objects *eb,
484                              struct list_head *objects)
485 {
486         struct drm_i915_gem_object *obj;
487         int ret = 0;
488
489         /* This is the fast path and we cannot handle a pagefault whilst
490          * holding the struct mutex lest the user pass in the relocations
491          * contained within a mmaped bo. For in such a case we, the page
492          * fault handler would call i915_gem_fault() and we would try to
493          * acquire the struct mutex again. Obviously this is bad and so
494          * lockdep complains vehemently.
495          */
496         pagefault_disable();
497         list_for_each_entry(obj, objects, exec_list) {
498                 ret = i915_gem_execbuffer_relocate_object(obj, eb);
499                 if (ret)
500                         break;
501         }
502         pagefault_enable();
503
504         return ret;
505 }
506
507 #define  __EXEC_OBJECT_HAS_FENCE (1<<31)
508
509 static int
510 need_reloc_mappable(struct drm_i915_gem_object *obj)
511 {
512         struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
513         return entry->relocation_count && !use_cpu_reloc(obj);
514 }
515
516 static int
517 pin_and_fence_object(struct drm_i915_gem_object *obj,
518                      struct intel_ring_buffer *ring)
519 {
520         struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
521         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
522         bool need_fence, need_mappable;
523         int ret;
524
525         need_fence =
526                 has_fenced_gpu_access &&
527                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
528                 obj->tiling_mode != I915_TILING_NONE;
529         need_mappable = need_fence || need_reloc_mappable(obj);
530
531         ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
532         if (ret)
533                 return ret;
534
535         if (has_fenced_gpu_access) {
536                 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
537                         ret = i915_gem_object_get_fence(obj);
538                         if (ret)
539                                 goto err_unpin;
540
541                         if (i915_gem_object_pin_fence(obj))
542                                 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
543
544                         obj->pending_fenced_gpu_access = true;
545                 }
546         }
547
548         entry->offset = obj->gtt_offset;
549         return 0;
550
551 err_unpin:
552         i915_gem_object_unpin(obj);
553         return ret;
554 }
555
556 static int
557 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
558                             struct drm_file *file,
559                             struct list_head *objects)
560 {
561         drm_i915_private_t *dev_priv = ring->dev->dev_private;
562         struct drm_i915_gem_object *obj;
563         int ret, retry;
564         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
565         struct list_head ordered_objects;
566
567         INIT_LIST_HEAD(&ordered_objects);
568         while (!list_empty(objects)) {
569                 struct drm_i915_gem_exec_object2 *entry;
570                 bool need_fence, need_mappable;
571
572                 obj = list_first_entry(objects,
573                                        struct drm_i915_gem_object,
574                                        exec_list);
575                 entry = obj->exec_entry;
576
577                 need_fence =
578                         has_fenced_gpu_access &&
579                         entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
580                         obj->tiling_mode != I915_TILING_NONE;
581                 need_mappable = need_fence || need_reloc_mappable(obj);
582
583                 if (need_mappable)
584                         list_move(&obj->exec_list, &ordered_objects);
585                 else
586                         list_move_tail(&obj->exec_list, &ordered_objects);
587
588                 obj->base.pending_read_domains = 0;
589                 obj->base.pending_write_domain = 0;
590         }
591         list_splice(&ordered_objects, objects);
592
593         /* Attempt to pin all of the buffers into the GTT.
594          * This is done in 3 phases:
595          *
596          * 1a. Unbind all objects that do not match the GTT constraints for
597          *     the execbuffer (fenceable, mappable, alignment etc).
598          * 1b. Increment pin count for already bound objects.
599          * 2.  Bind new objects.
600          * 3.  Decrement pin count.
601          *
602          * This avoid unnecessary unbinding of later objects in order to makr
603          * room for the earlier objects *unless* we need to defragment.
604          */
605         retry = 0;
606         do {
607                 ret = 0;
608
609                 /* Unbind any ill-fitting objects or pin. */
610                 list_for_each_entry(obj, objects, exec_list) {
611                         struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
612                         bool need_fence, need_mappable;
613
614                         if (!obj->gtt_space)
615                                 continue;
616
617                         need_fence =
618                                 has_fenced_gpu_access &&
619                                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
620                                 obj->tiling_mode != I915_TILING_NONE;
621                         need_mappable = need_fence || need_reloc_mappable(obj);
622
623                         if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
624                             (need_mappable && !obj->map_and_fenceable))
625                                 ret = i915_gem_object_unbind(obj);
626                         else
627                                 ret = pin_and_fence_object(obj, ring);
628                         if (ret)
629                                 goto err;
630                 }
631
632                 /* Bind fresh objects */
633                 list_for_each_entry(obj, objects, exec_list) {
634                         if (obj->gtt_space)
635                                 continue;
636
637                         ret = pin_and_fence_object(obj, ring);
638                         if (ret) {
639                                 int ret_ignore;
640
641                                 /* This can potentially raise a harmless
642                                  * -EINVAL if we failed to bind in the above
643                                  * call. It cannot raise -EINTR since we know
644                                  * that the bo is freshly bound and so will
645                                  * not need to be flushed or waited upon.
646                                  */
647                                 ret_ignore = i915_gem_object_unbind(obj);
648                                 (void)ret_ignore;
649                                 WARN_ON(obj->gtt_space);
650                                 break;
651                         }
652                 }
653
654                 /* Decrement pin count for bound objects */
655                 list_for_each_entry(obj, objects, exec_list) {
656                         struct drm_i915_gem_exec_object2 *entry;
657
658                         if (!obj->gtt_space)
659                                 continue;
660
661                         entry = obj->exec_entry;
662                         if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
663                                 i915_gem_object_unpin_fence(obj);
664                                 entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
665                         }
666
667                         i915_gem_object_unpin(obj);
668
669                         /* ... and ensure ppgtt mapping exist if needed. */
670                         if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
671                                 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
672                                                        obj, obj->cache_level);
673
674                                 obj->has_aliasing_ppgtt_mapping = 1;
675                         }
676                 }
677
678                 if (ret != -ENOSPC || retry > 1)
679                         return ret;
680
681                 /* First attempt, just clear anything that is purgeable.
682                  * Second attempt, clear the entire GTT.
683                  */
684                 ret = i915_gem_evict_everything(ring->dev, retry == 0);
685                 if (ret)
686                         return ret;
687
688                 retry++;
689         } while (1);
690
691 err:
692         list_for_each_entry_continue_reverse(obj, objects, exec_list) {
693                 struct drm_i915_gem_exec_object2 *entry;
694
695                 if (!obj->gtt_space)
696                         continue;
697
698                 entry = obj->exec_entry;
699                 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
700                         i915_gem_object_unpin_fence(obj);
701                         entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
702                 }
703
704                 i915_gem_object_unpin(obj);
705         }
706
707         return ret;
708 }
709
710 static int
711 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
712                                   struct drm_file *file,
713                                   struct intel_ring_buffer *ring,
714                                   struct list_head *objects,
715                                   struct eb_objects *eb,
716                                   struct drm_i915_gem_exec_object2 *exec,
717                                   int count)
718 {
719         struct drm_i915_gem_relocation_entry *reloc;
720         struct drm_i915_gem_object *obj;
721         int *reloc_offset;
722         int i, total, ret;
723
724         /* We may process another execbuffer during the unlock... */
725         while (!list_empty(objects)) {
726                 obj = list_first_entry(objects,
727                                        struct drm_i915_gem_object,
728                                        exec_list);
729                 list_del_init(&obj->exec_list);
730                 drm_gem_object_unreference(&obj->base);
731         }
732
733         mutex_unlock(&dev->struct_mutex);
734
735         total = 0;
736         for (i = 0; i < count; i++)
737                 total += exec[i].relocation_count;
738
739         reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
740         reloc = drm_malloc_ab(total, sizeof(*reloc));
741         if (reloc == NULL || reloc_offset == NULL) {
742                 drm_free_large(reloc);
743                 drm_free_large(reloc_offset);
744                 mutex_lock(&dev->struct_mutex);
745                 return -ENOMEM;
746         }
747
748         total = 0;
749         for (i = 0; i < count; i++) {
750                 struct drm_i915_gem_relocation_entry __user *user_relocs;
751
752                 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
753
754                 if (copy_from_user(reloc+total, user_relocs,
755                                    exec[i].relocation_count * sizeof(*reloc))) {
756                         ret = -EFAULT;
757                         mutex_lock(&dev->struct_mutex);
758                         goto err;
759                 }
760
761                 reloc_offset[i] = total;
762                 total += exec[i].relocation_count;
763         }
764
765         ret = i915_mutex_lock_interruptible(dev);
766         if (ret) {
767                 mutex_lock(&dev->struct_mutex);
768                 goto err;
769         }
770
771         /* reacquire the objects */
772         eb_reset(eb);
773         for (i = 0; i < count; i++) {
774                 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
775                                                         exec[i].handle));
776                 if (&obj->base == NULL) {
777                         DRM_DEBUG("Invalid object handle %d at index %d\n",
778                                    exec[i].handle, i);
779                         ret = -ENOENT;
780                         goto err;
781                 }
782
783                 list_add_tail(&obj->exec_list, objects);
784                 obj->exec_handle = exec[i].handle;
785                 obj->exec_entry = &exec[i];
786                 eb_add_object(eb, obj);
787         }
788
789         ret = i915_gem_execbuffer_reserve(ring, file, objects);
790         if (ret)
791                 goto err;
792
793         list_for_each_entry(obj, objects, exec_list) {
794                 int offset = obj->exec_entry - exec;
795                 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
796                                                                reloc + reloc_offset[offset]);
797                 if (ret)
798                         goto err;
799         }
800
801         /* Leave the user relocations as are, this is the painfully slow path,
802          * and we want to avoid the complication of dropping the lock whilst
803          * having buffers reserved in the aperture and so causing spurious
804          * ENOSPC for random operations.
805          */
806
807 err:
808         drm_free_large(reloc);
809         drm_free_large(reloc_offset);
810         return ret;
811 }
812
813 static int
814 i915_gem_execbuffer_flush(struct drm_device *dev,
815                           uint32_t invalidate_domains,
816                           uint32_t flush_domains,
817                           uint32_t flush_rings)
818 {
819         drm_i915_private_t *dev_priv = dev->dev_private;
820         int i, ret;
821
822         if (flush_domains & I915_GEM_DOMAIN_CPU)
823                 intel_gtt_chipset_flush();
824
825         if (flush_domains & I915_GEM_DOMAIN_GTT)
826                 wmb();
827
828         if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
829                 for (i = 0; i < I915_NUM_RINGS; i++)
830                         if (flush_rings & (1 << i)) {
831                                 ret = i915_gem_flush_ring(&dev_priv->ring[i],
832                                                           invalidate_domains,
833                                                           flush_domains);
834                                 if (ret)
835                                         return ret;
836                         }
837         }
838
839         return 0;
840 }
841
842 static int
843 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
844 {
845         u32 plane, flip_mask;
846         int ret;
847
848         /* Check for any pending flips. As we only maintain a flip queue depth
849          * of 1, we can simply insert a WAIT for the next display flip prior
850          * to executing the batch and avoid stalling the CPU.
851          */
852
853         for (plane = 0; flips >> plane; plane++) {
854                 if (((flips >> plane) & 1) == 0)
855                         continue;
856
857                 if (plane)
858                         flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
859                 else
860                         flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
861
862                 ret = intel_ring_begin(ring, 2);
863                 if (ret)
864                         return ret;
865
866                 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
867                 intel_ring_emit(ring, MI_NOOP);
868                 intel_ring_advance(ring);
869         }
870
871         return 0;
872 }
873
874
875 static int
876 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
877                                 struct list_head *objects)
878 {
879         struct drm_i915_gem_object *obj;
880         struct change_domains cd;
881         int ret;
882
883         memset(&cd, 0, sizeof(cd));
884         list_for_each_entry(obj, objects, exec_list)
885                 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
886
887         if (cd.invalidate_domains | cd.flush_domains) {
888                 ret = i915_gem_execbuffer_flush(ring->dev,
889                                                 cd.invalidate_domains,
890                                                 cd.flush_domains,
891                                                 cd.flush_rings);
892                 if (ret)
893                         return ret;
894         }
895
896         if (cd.flips) {
897                 ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
898                 if (ret)
899                         return ret;
900         }
901
902         list_for_each_entry(obj, objects, exec_list) {
903                 ret = i915_gem_object_sync(obj, ring);
904                 if (ret)
905                         return ret;
906         }
907
908         return 0;
909 }
910
911 static bool
912 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
913 {
914         return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
915 }
916
917 static int
918 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
919                    int count)
920 {
921         int i;
922
923         for (i = 0; i < count; i++) {
924                 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
925                 int length; /* limited by fault_in_pages_readable() */
926
927                 /* First check for malicious input causing overflow */
928                 if (exec[i].relocation_count >
929                     INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
930                         return -EINVAL;
931
932                 length = exec[i].relocation_count *
933                         sizeof(struct drm_i915_gem_relocation_entry);
934                 if (!access_ok(VERIFY_READ, ptr, length))
935                         return -EFAULT;
936
937                 /* we may also need to update the presumed offsets */
938                 if (!access_ok(VERIFY_WRITE, ptr, length))
939                         return -EFAULT;
940
941                 if (fault_in_multipages_readable(ptr, length))
942                         return -EFAULT;
943         }
944
945         return 0;
946 }
947
948 static void
949 i915_gem_execbuffer_move_to_active(struct list_head *objects,
950                                    struct intel_ring_buffer *ring,
951                                    u32 seqno)
952 {
953         struct drm_i915_gem_object *obj;
954
955         list_for_each_entry(obj, objects, exec_list) {
956                   u32 old_read = obj->base.read_domains;
957                   u32 old_write = obj->base.write_domain;
958
959
960                 obj->base.read_domains = obj->base.pending_read_domains;
961                 obj->base.write_domain = obj->base.pending_write_domain;
962                 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
963
964                 i915_gem_object_move_to_active(obj, ring, seqno);
965                 if (obj->base.write_domain) {
966                         obj->dirty = 1;
967                         obj->pending_gpu_write = true;
968                         list_move_tail(&obj->gpu_write_list,
969                                        &ring->gpu_write_list);
970                         if (obj->pin_count) /* check for potential scanout */
971                                 intel_mark_busy(ring->dev, obj);
972                 }
973
974                 trace_i915_gem_object_change_domain(obj, old_read, old_write);
975         }
976
977         intel_mark_busy(ring->dev, NULL);
978 }
979
980 static void
981 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
982                                     struct drm_file *file,
983                                     struct intel_ring_buffer *ring)
984 {
985         struct drm_i915_gem_request *request;
986         u32 invalidate;
987
988         /*
989          * Ensure that the commands in the batch buffer are
990          * finished before the interrupt fires.
991          *
992          * The sampler always gets flushed on i965 (sigh).
993          */
994         invalidate = I915_GEM_DOMAIN_COMMAND;
995         if (INTEL_INFO(dev)->gen >= 4)
996                 invalidate |= I915_GEM_DOMAIN_SAMPLER;
997         if (ring->flush(ring, invalidate, 0)) {
998                 i915_gem_next_request_seqno(ring);
999                 return;
1000         }
1001
1002         /* Add a breadcrumb for the completion of the batch buffer */
1003         request = kzalloc(sizeof(*request), GFP_KERNEL);
1004         if (request == NULL || i915_add_request(ring, file, request)) {
1005                 i915_gem_next_request_seqno(ring);
1006                 kfree(request);
1007         }
1008 }
1009
1010 static int
1011 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1012                             struct intel_ring_buffer *ring)
1013 {
1014         drm_i915_private_t *dev_priv = dev->dev_private;
1015         int ret, i;
1016
1017         if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
1018                 return 0;
1019
1020         ret = intel_ring_begin(ring, 4 * 3);
1021         if (ret)
1022                 return ret;
1023
1024         for (i = 0; i < 4; i++) {
1025                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1026                 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1027                 intel_ring_emit(ring, 0);
1028         }
1029
1030         intel_ring_advance(ring);
1031
1032         return 0;
1033 }
1034
1035 static int
1036 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1037                        struct drm_file *file,
1038                        struct drm_i915_gem_execbuffer2 *args,
1039                        struct drm_i915_gem_exec_object2 *exec)
1040 {
1041         drm_i915_private_t *dev_priv = dev->dev_private;
1042         struct list_head objects;
1043         struct eb_objects *eb;
1044         struct drm_i915_gem_object *batch_obj;
1045         struct drm_clip_rect *cliprects = NULL;
1046         struct intel_ring_buffer *ring;
1047         u32 exec_start, exec_len;
1048         u32 seqno;
1049         u32 mask;
1050         int ret, mode, i;
1051
1052         if (!i915_gem_check_execbuffer(args)) {
1053                 DRM_DEBUG("execbuf with invalid offset/length\n");
1054                 return -EINVAL;
1055         }
1056
1057         ret = validate_exec_list(exec, args->buffer_count);
1058         if (ret)
1059                 return ret;
1060
1061         switch (args->flags & I915_EXEC_RING_MASK) {
1062         case I915_EXEC_DEFAULT:
1063         case I915_EXEC_RENDER:
1064                 ring = &dev_priv->ring[RCS];
1065                 break;
1066         case I915_EXEC_BSD:
1067                 ring = &dev_priv->ring[VCS];
1068                 break;
1069         case I915_EXEC_BLT:
1070                 ring = &dev_priv->ring[BCS];
1071                 break;
1072         default:
1073                 DRM_DEBUG("execbuf with unknown ring: %d\n",
1074                           (int)(args->flags & I915_EXEC_RING_MASK));
1075                 return -EINVAL;
1076         }
1077         if (!intel_ring_initialized(ring)) {
1078                 DRM_DEBUG("execbuf with invalid ring: %d\n",
1079                           (int)(args->flags & I915_EXEC_RING_MASK));
1080                 return -EINVAL;
1081         }
1082
1083         mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1084         mask = I915_EXEC_CONSTANTS_MASK;
1085         switch (mode) {
1086         case I915_EXEC_CONSTANTS_REL_GENERAL:
1087         case I915_EXEC_CONSTANTS_ABSOLUTE:
1088         case I915_EXEC_CONSTANTS_REL_SURFACE:
1089                 if (ring == &dev_priv->ring[RCS] &&
1090                     mode != dev_priv->relative_constants_mode) {
1091                         if (INTEL_INFO(dev)->gen < 4)
1092                                 return -EINVAL;
1093
1094                         if (INTEL_INFO(dev)->gen > 5 &&
1095                             mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1096                                 return -EINVAL;
1097
1098                         /* The HW changed the meaning on this bit on gen6 */
1099                         if (INTEL_INFO(dev)->gen >= 6)
1100                                 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1101                 }
1102                 break;
1103         default:
1104                 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1105                 return -EINVAL;
1106         }
1107
1108         if (args->buffer_count < 1) {
1109                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1110                 return -EINVAL;
1111         }
1112
1113         if (args->num_cliprects != 0) {
1114                 if (ring != &dev_priv->ring[RCS]) {
1115                         DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1116                         return -EINVAL;
1117                 }
1118
1119                 if (INTEL_INFO(dev)->gen >= 5) {
1120                         DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1121                         return -EINVAL;
1122                 }
1123
1124                 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1125                         DRM_DEBUG("execbuf with %u cliprects\n",
1126                                   args->num_cliprects);
1127                         return -EINVAL;
1128                 }
1129
1130                 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1131                                     GFP_KERNEL);
1132                 if (cliprects == NULL) {
1133                         ret = -ENOMEM;
1134                         goto pre_mutex_err;
1135                 }
1136
1137                 if (copy_from_user(cliprects,
1138                                      (struct drm_clip_rect __user *)(uintptr_t)
1139                                      args->cliprects_ptr,
1140                                      sizeof(*cliprects)*args->num_cliprects)) {
1141                         ret = -EFAULT;
1142                         goto pre_mutex_err;
1143                 }
1144         }
1145
1146         ret = i915_mutex_lock_interruptible(dev);
1147         if (ret)
1148                 goto pre_mutex_err;
1149
1150         if (dev_priv->mm.suspended) {
1151                 mutex_unlock(&dev->struct_mutex);
1152                 ret = -EBUSY;
1153                 goto pre_mutex_err;
1154         }
1155
1156         eb = eb_create(args->buffer_count);
1157         if (eb == NULL) {
1158                 mutex_unlock(&dev->struct_mutex);
1159                 ret = -ENOMEM;
1160                 goto pre_mutex_err;
1161         }
1162
1163         /* Look up object handles */
1164         INIT_LIST_HEAD(&objects);
1165         for (i = 0; i < args->buffer_count; i++) {
1166                 struct drm_i915_gem_object *obj;
1167
1168                 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
1169                                                         exec[i].handle));
1170                 if (&obj->base == NULL) {
1171                         DRM_DEBUG("Invalid object handle %d at index %d\n",
1172                                    exec[i].handle, i);
1173                         /* prevent error path from reading uninitialized data */
1174                         ret = -ENOENT;
1175                         goto err;
1176                 }
1177
1178                 if (!list_empty(&obj->exec_list)) {
1179                         DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
1180                                    obj, exec[i].handle, i);
1181                         ret = -EINVAL;
1182                         goto err;
1183                 }
1184
1185                 list_add_tail(&obj->exec_list, &objects);
1186                 obj->exec_handle = exec[i].handle;
1187                 obj->exec_entry = &exec[i];
1188                 eb_add_object(eb, obj);
1189         }
1190
1191         /* take note of the batch buffer before we might reorder the lists */
1192         batch_obj = list_entry(objects.prev,
1193                                struct drm_i915_gem_object,
1194                                exec_list);
1195
1196         /* Move the objects en-masse into the GTT, evicting if necessary. */
1197         ret = i915_gem_execbuffer_reserve(ring, file, &objects);
1198         if (ret)
1199                 goto err;
1200
1201         /* The objects are in their final locations, apply the relocations. */
1202         ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
1203         if (ret) {
1204                 if (ret == -EFAULT) {
1205                         ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
1206                                                                 &objects, eb,
1207                                                                 exec,
1208                                                                 args->buffer_count);
1209                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1210                 }
1211                 if (ret)
1212                         goto err;
1213         }
1214
1215         /* Set the pending read domains for the batch buffer to COMMAND */
1216         if (batch_obj->base.pending_write_domain) {
1217                 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1218                 ret = -EINVAL;
1219                 goto err;
1220         }
1221         batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1222
1223         ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
1224         if (ret)
1225                 goto err;
1226
1227         seqno = i915_gem_next_request_seqno(ring);
1228         for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1229                 if (seqno < ring->sync_seqno[i]) {
1230                         /* The GPU can not handle its semaphore value wrapping,
1231                          * so every billion or so execbuffers, we need to stall
1232                          * the GPU in order to reset the counters.
1233                          */
1234                         ret = i915_gpu_idle(dev);
1235                         if (ret)
1236                                 goto err;
1237                         i915_gem_retire_requests(dev);
1238
1239                         BUG_ON(ring->sync_seqno[i]);
1240                 }
1241         }
1242
1243         if (ring == &dev_priv->ring[RCS] &&
1244             mode != dev_priv->relative_constants_mode) {
1245                 ret = intel_ring_begin(ring, 4);
1246                 if (ret)
1247                                 goto err;
1248
1249                 intel_ring_emit(ring, MI_NOOP);
1250                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1251                 intel_ring_emit(ring, INSTPM);
1252                 intel_ring_emit(ring, mask << 16 | mode);
1253                 intel_ring_advance(ring);
1254
1255                 dev_priv->relative_constants_mode = mode;
1256         }
1257
1258         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1259                 ret = i915_reset_gen7_sol_offsets(dev, ring);
1260                 if (ret)
1261                         goto err;
1262         }
1263
1264         trace_i915_gem_ring_dispatch(ring, seqno);
1265
1266         exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1267         exec_len = args->batch_len;
1268         if (cliprects) {
1269                 for (i = 0; i < args->num_cliprects; i++) {
1270                         ret = i915_emit_box(dev, &cliprects[i],
1271                                             args->DR1, args->DR4);
1272                         if (ret)
1273                                 goto err;
1274
1275                         ret = ring->dispatch_execbuffer(ring,
1276                                                         exec_start, exec_len);
1277                         if (ret)
1278                                 goto err;
1279                 }
1280         } else {
1281                 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
1282                 if (ret)
1283                         goto err;
1284         }
1285
1286         i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
1287         i915_gem_execbuffer_retire_commands(dev, file, ring);
1288
1289 err:
1290         eb_destroy(eb);
1291         while (!list_empty(&objects)) {
1292                 struct drm_i915_gem_object *obj;
1293
1294                 obj = list_first_entry(&objects,
1295                                        struct drm_i915_gem_object,
1296                                        exec_list);
1297                 list_del_init(&obj->exec_list);
1298                 drm_gem_object_unreference(&obj->base);
1299         }
1300
1301         mutex_unlock(&dev->struct_mutex);
1302
1303 pre_mutex_err:
1304         kfree(cliprects);
1305         return ret;
1306 }
1307
1308 /*
1309  * Legacy execbuffer just creates an exec2 list from the original exec object
1310  * list array and passes it to the real function.
1311  */
1312 int
1313 i915_gem_execbuffer(struct drm_device *dev, void *data,
1314                     struct drm_file *file)
1315 {
1316         struct drm_i915_gem_execbuffer *args = data;
1317         struct drm_i915_gem_execbuffer2 exec2;
1318         struct drm_i915_gem_exec_object *exec_list = NULL;
1319         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1320         int ret, i;
1321
1322         if (args->buffer_count < 1) {
1323                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1324                 return -EINVAL;
1325         }
1326
1327         /* Copy in the exec list from userland */
1328         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1329         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1330         if (exec_list == NULL || exec2_list == NULL) {
1331                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1332                           args->buffer_count);
1333                 drm_free_large(exec_list);
1334                 drm_free_large(exec2_list);
1335                 return -ENOMEM;
1336         }
1337         ret = copy_from_user(exec_list,
1338                              (struct drm_i915_relocation_entry __user *)
1339                              (uintptr_t) args->buffers_ptr,
1340                              sizeof(*exec_list) * args->buffer_count);
1341         if (ret != 0) {
1342                 DRM_DEBUG("copy %d exec entries failed %d\n",
1343                           args->buffer_count, ret);
1344                 drm_free_large(exec_list);
1345                 drm_free_large(exec2_list);
1346                 return -EFAULT;
1347         }
1348
1349         for (i = 0; i < args->buffer_count; i++) {
1350                 exec2_list[i].handle = exec_list[i].handle;
1351                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1352                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1353                 exec2_list[i].alignment = exec_list[i].alignment;
1354                 exec2_list[i].offset = exec_list[i].offset;
1355                 if (INTEL_INFO(dev)->gen < 4)
1356                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1357                 else
1358                         exec2_list[i].flags = 0;
1359         }
1360
1361         exec2.buffers_ptr = args->buffers_ptr;
1362         exec2.buffer_count = args->buffer_count;
1363         exec2.batch_start_offset = args->batch_start_offset;
1364         exec2.batch_len = args->batch_len;
1365         exec2.DR1 = args->DR1;
1366         exec2.DR4 = args->DR4;
1367         exec2.num_cliprects = args->num_cliprects;
1368         exec2.cliprects_ptr = args->cliprects_ptr;
1369         exec2.flags = I915_EXEC_RENDER;
1370
1371         ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1372         if (!ret) {
1373                 /* Copy the new buffer offsets back to the user's exec list. */
1374                 for (i = 0; i < args->buffer_count; i++)
1375                         exec_list[i].offset = exec2_list[i].offset;
1376                 /* ... and back out to userspace */
1377                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1378                                    (uintptr_t) args->buffers_ptr,
1379                                    exec_list,
1380                                    sizeof(*exec_list) * args->buffer_count);
1381                 if (ret) {
1382                         ret = -EFAULT;
1383                         DRM_DEBUG("failed to copy %d exec entries "
1384                                   "back to user (%d)\n",
1385                                   args->buffer_count, ret);
1386                 }
1387         }
1388
1389         drm_free_large(exec_list);
1390         drm_free_large(exec2_list);
1391         return ret;
1392 }
1393
1394 int
1395 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1396                      struct drm_file *file)
1397 {
1398         struct drm_i915_gem_execbuffer2 *args = data;
1399         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1400         int ret;
1401
1402         if (args->buffer_count < 1 ||
1403             args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1404                 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1405                 return -EINVAL;
1406         }
1407
1408         exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1409                              GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
1410         if (exec2_list == NULL)
1411                 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1412                                            args->buffer_count);
1413         if (exec2_list == NULL) {
1414                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1415                           args->buffer_count);
1416                 return -ENOMEM;
1417         }
1418         ret = copy_from_user(exec2_list,
1419                              (struct drm_i915_relocation_entry __user *)
1420                              (uintptr_t) args->buffers_ptr,
1421                              sizeof(*exec2_list) * args->buffer_count);
1422         if (ret != 0) {
1423                 DRM_DEBUG("copy %d exec entries failed %d\n",
1424                           args->buffer_count, ret);
1425                 drm_free_large(exec2_list);
1426                 return -EFAULT;
1427         }
1428
1429         ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1430         if (!ret) {
1431                 /* Copy the new buffer offsets back to the user's exec list. */
1432                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1433                                    (uintptr_t) args->buffers_ptr,
1434                                    exec2_list,
1435                                    sizeof(*exec2_list) * args->buffer_count);
1436                 if (ret) {
1437                         ret = -EFAULT;
1438                         DRM_DEBUG("failed to copy %d exec entries "
1439                                   "back to user (%d)\n",
1440                                   args->buffer_count, ret);
1441                 }
1442         }
1443
1444         drm_free_large(exec2_list);
1445         return ret;
1446 }