]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/nouveau/nouveau_gem.c
ba744daeb50eb98dfbc6f293bddc827e2ef27801
[karo-tx-linux.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include <linux/dma-buf.h>
28 #include <nouveau_drm.h>
29
30 #include <subdev/fb.h>
31
32 #include "nouveau_drm.h"
33 #include "nouveau_dma.h"
34 #include "nouveau_fence.h"
35 #include "nouveau_abi16.h"
36
37 #include "nouveau_ttm.h"
38 #include "nouveau_gem.h"
39
40 int
41 nouveau_gem_object_new(struct drm_gem_object *gem)
42 {
43         return 0;
44 }
45
46 void
47 nouveau_gem_object_del(struct drm_gem_object *gem)
48 {
49         struct nouveau_bo *nvbo = gem->driver_private;
50         struct ttm_buffer_object *bo = &nvbo->bo;
51
52         if (!nvbo)
53                 return;
54         nvbo->gem = NULL;
55
56         if (unlikely(nvbo->pin_refcnt)) {
57                 nvbo->pin_refcnt = 1;
58                 nouveau_bo_unpin(nvbo);
59         }
60
61         if (gem->import_attach)
62                 drm_prime_gem_destroy(gem, nvbo->bo.sg);
63
64         ttm_bo_unref(&bo);
65
66         drm_gem_object_release(gem);
67         kfree(gem);
68 }
69
70 int
71 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
72 {
73         struct nouveau_cli *cli = nouveau_cli(file_priv);
74         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
75         struct nouveau_vma *vma;
76         int ret;
77
78         if (!cli->base.vm)
79                 return 0;
80
81         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
82         if (ret)
83                 return ret;
84
85         vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
86         if (!vma) {
87                 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
88                 if (!vma) {
89                         ret = -ENOMEM;
90                         goto out;
91                 }
92
93                 ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
94                 if (ret) {
95                         kfree(vma);
96                         goto out;
97                 }
98         } else {
99                 vma->refcount++;
100         }
101
102 out:
103         ttm_bo_unreserve(&nvbo->bo);
104         return ret;
105 }
106
107 void
108 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
109 {
110         struct nouveau_cli *cli = nouveau_cli(file_priv);
111         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
112         struct nouveau_vma *vma;
113         int ret;
114
115         if (!cli->base.vm)
116                 return;
117
118         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
119         if (ret)
120                 return;
121
122         vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
123         if (vma) {
124                 if (--vma->refcount == 0) {
125                         nouveau_bo_vma_del(nvbo, vma);
126                         kfree(vma);
127                 }
128         }
129         ttm_bo_unreserve(&nvbo->bo);
130 }
131
132 int
133 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
134                 uint32_t tile_mode, uint32_t tile_flags,
135                 struct nouveau_bo **pnvbo)
136 {
137         struct nouveau_drm *drm = nouveau_drm(dev);
138         struct nouveau_bo *nvbo;
139         u32 flags = 0;
140         int ret;
141
142         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
143                 flags |= TTM_PL_FLAG_VRAM;
144         if (domain & NOUVEAU_GEM_DOMAIN_GART)
145                 flags |= TTM_PL_FLAG_TT;
146         if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
147                 flags |= TTM_PL_FLAG_SYSTEM;
148
149         ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
150                              tile_flags, NULL, pnvbo);
151         if (ret)
152                 return ret;
153         nvbo = *pnvbo;
154
155         /* we restrict allowed domains on nv50+ to only the types
156          * that were requested at creation time.  not possibly on
157          * earlier chips without busting the ABI.
158          */
159         nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
160                               NOUVEAU_GEM_DOMAIN_GART;
161         if (nv_device(drm->device)->card_type >= NV_50)
162                 nvbo->valid_domains &= domain;
163
164         nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
165         if (!nvbo->gem) {
166                 nouveau_bo_ref(NULL, pnvbo);
167                 return -ENOMEM;
168         }
169
170         nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
171         nvbo->gem->driver_private = nvbo;
172         return 0;
173 }
174
175 static int
176 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
177                  struct drm_nouveau_gem_info *rep)
178 {
179         struct nouveau_cli *cli = nouveau_cli(file_priv);
180         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
181         struct nouveau_vma *vma;
182
183         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
184                 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
185         else
186                 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
187
188         rep->offset = nvbo->bo.offset;
189         if (cli->base.vm) {
190                 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
191                 if (!vma)
192                         return -EINVAL;
193
194                 rep->offset = vma->offset;
195         }
196
197         rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
198         rep->map_handle = nvbo->bo.addr_space_offset;
199         rep->tile_mode = nvbo->tile_mode;
200         rep->tile_flags = nvbo->tile_flags;
201         return 0;
202 }
203
204 int
205 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
206                       struct drm_file *file_priv)
207 {
208         struct nouveau_drm *drm = nouveau_drm(dev);
209         struct nouveau_fb *pfb = nouveau_fb(drm->device);
210         struct drm_nouveau_gem_new *req = data;
211         struct nouveau_bo *nvbo = NULL;
212         int ret = 0;
213
214         drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
215
216         if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
217                 NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
218                 return -EINVAL;
219         }
220
221         ret = nouveau_gem_new(dev, req->info.size, req->align,
222                               req->info.domain, req->info.tile_mode,
223                               req->info.tile_flags, &nvbo);
224         if (ret)
225                 return ret;
226
227         ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
228         if (ret == 0) {
229                 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
230                 if (ret)
231                         drm_gem_handle_delete(file_priv, req->info.handle);
232         }
233
234         /* drop reference from allocate - handle holds it now */
235         drm_gem_object_unreference_unlocked(nvbo->gem);
236         return ret;
237 }
238
239 static int
240 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
241                        uint32_t write_domains, uint32_t valid_domains)
242 {
243         struct nouveau_bo *nvbo = gem->driver_private;
244         struct ttm_buffer_object *bo = &nvbo->bo;
245         uint32_t domains = valid_domains & nvbo->valid_domains &
246                 (write_domains ? write_domains : read_domains);
247         uint32_t pref_flags = 0, valid_flags = 0;
248
249         if (!domains)
250                 return -EINVAL;
251
252         if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
253                 valid_flags |= TTM_PL_FLAG_VRAM;
254
255         if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
256                 valid_flags |= TTM_PL_FLAG_TT;
257
258         if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
259             bo->mem.mem_type == TTM_PL_VRAM)
260                 pref_flags |= TTM_PL_FLAG_VRAM;
261
262         else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
263                  bo->mem.mem_type == TTM_PL_TT)
264                 pref_flags |= TTM_PL_FLAG_TT;
265
266         else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
267                 pref_flags |= TTM_PL_FLAG_VRAM;
268
269         else
270                 pref_flags |= TTM_PL_FLAG_TT;
271
272         nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
273
274         return 0;
275 }
276
277 struct validate_op {
278         struct list_head vram_list;
279         struct list_head gart_list;
280         struct list_head both_list;
281 };
282
283 static void
284 validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
285 {
286         struct list_head *entry, *tmp;
287         struct nouveau_bo *nvbo;
288
289         list_for_each_safe(entry, tmp, list) {
290                 nvbo = list_entry(entry, struct nouveau_bo, entry);
291
292                 nouveau_bo_fence(nvbo, fence);
293
294                 if (unlikely(nvbo->validate_mapped)) {
295                         ttm_bo_kunmap(&nvbo->kmap);
296                         nvbo->validate_mapped = false;
297                 }
298
299                 list_del(&nvbo->entry);
300                 nvbo->reserved_by = NULL;
301                 ttm_bo_unreserve(&nvbo->bo);
302                 drm_gem_object_unreference_unlocked(nvbo->gem);
303         }
304 }
305
306 static void
307 validate_fini(struct validate_op *op, struct nouveau_fence* fence)
308 {
309         validate_fini_list(&op->vram_list, fence);
310         validate_fini_list(&op->gart_list, fence);
311         validate_fini_list(&op->both_list, fence);
312 }
313
314 static int
315 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
316               struct drm_nouveau_gem_pushbuf_bo *pbbo,
317               int nr_buffers, struct validate_op *op)
318 {
319         struct drm_device *dev = chan->drm->dev;
320         struct nouveau_drm *drm = nouveau_drm(dev);
321         uint32_t sequence;
322         int trycnt = 0;
323         int ret, i;
324
325         sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
326 retry:
327         if (++trycnt > 100000) {
328                 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
329                 return -EINVAL;
330         }
331
332         for (i = 0; i < nr_buffers; i++) {
333                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
334                 struct drm_gem_object *gem;
335                 struct nouveau_bo *nvbo;
336
337                 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
338                 if (!gem) {
339                         NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
340                         validate_fini(op, NULL);
341                         return -ENOENT;
342                 }
343                 nvbo = gem->driver_private;
344
345                 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
346                         NV_ERROR(drm, "multiple instances of buffer %d on "
347                                       "validation list\n", b->handle);
348                         drm_gem_object_unreference_unlocked(gem);
349                         validate_fini(op, NULL);
350                         return -EINVAL;
351                 }
352
353                 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
354                 if (ret) {
355                         validate_fini(op, NULL);
356                         if (unlikely(ret == -EAGAIN))
357                                 ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
358                         drm_gem_object_unreference_unlocked(gem);
359                         if (unlikely(ret)) {
360                                 if (ret != -ERESTARTSYS)
361                                         NV_ERROR(drm, "fail reserve\n");
362                                 return ret;
363                         }
364                         goto retry;
365                 }
366
367                 b->user_priv = (uint64_t)(unsigned long)nvbo;
368                 nvbo->reserved_by = file_priv;
369                 nvbo->pbbo_index = i;
370                 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
371                     (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
372                         list_add_tail(&nvbo->entry, &op->both_list);
373                 else
374                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
375                         list_add_tail(&nvbo->entry, &op->vram_list);
376                 else
377                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
378                         list_add_tail(&nvbo->entry, &op->gart_list);
379                 else {
380                         NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
381                                  b->valid_domains);
382                         list_add_tail(&nvbo->entry, &op->both_list);
383                         validate_fini(op, NULL);
384                         return -EINVAL;
385                 }
386         }
387
388         return 0;
389 }
390
391 static int
392 validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
393 {
394         struct nouveau_fence *fence = NULL;
395         int ret = 0;
396
397         spin_lock(&nvbo->bo.bdev->fence_lock);
398         if (nvbo->bo.sync_obj)
399                 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
400         spin_unlock(&nvbo->bo.bdev->fence_lock);
401
402         if (fence) {
403                 ret = nouveau_fence_sync(fence, chan);
404                 nouveau_fence_unref(&fence);
405         }
406
407         return ret;
408 }
409
410 static int
411 validate_list(struct nouveau_channel *chan, struct list_head *list,
412               struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
413 {
414         struct nouveau_drm *drm = chan->drm;
415         struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
416                                 (void __force __user *)(uintptr_t)user_pbbo_ptr;
417         struct nouveau_bo *nvbo;
418         int ret, relocs = 0;
419
420         list_for_each_entry(nvbo, list, entry) {
421                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
422
423                 ret = validate_sync(chan, nvbo);
424                 if (unlikely(ret)) {
425                         NV_ERROR(drm, "fail pre-validate sync\n");
426                         return ret;
427                 }
428
429                 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
430                                              b->write_domains,
431                                              b->valid_domains);
432                 if (unlikely(ret)) {
433                         NV_ERROR(drm, "fail set_domain\n");
434                         return ret;
435                 }
436
437                 ret = nouveau_bo_validate(nvbo, true, false, false);
438                 if (unlikely(ret)) {
439                         if (ret != -ERESTARTSYS)
440                                 NV_ERROR(drm, "fail ttm_validate\n");
441                         return ret;
442                 }
443
444                 ret = validate_sync(chan, nvbo);
445                 if (unlikely(ret)) {
446                         NV_ERROR(drm, "fail post-validate sync\n");
447                         return ret;
448                 }
449
450                 if (nv_device(drm->device)->card_type < NV_50) {
451                         if (nvbo->bo.offset == b->presumed.offset &&
452                             ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
453                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
454                              (nvbo->bo.mem.mem_type == TTM_PL_TT &&
455                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
456                                 continue;
457
458                         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
459                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
460                         else
461                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
462                         b->presumed.offset = nvbo->bo.offset;
463                         b->presumed.valid = 0;
464                         relocs++;
465
466                         if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
467                                              &b->presumed, sizeof(b->presumed)))
468                                 return -EFAULT;
469                 }
470         }
471
472         return relocs;
473 }
474
475 static int
476 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
477                              struct drm_file *file_priv,
478                              struct drm_nouveau_gem_pushbuf_bo *pbbo,
479                              uint64_t user_buffers, int nr_buffers,
480                              struct validate_op *op, int *apply_relocs)
481 {
482         struct nouveau_drm *drm = chan->drm;
483         int ret, relocs = 0;
484
485         INIT_LIST_HEAD(&op->vram_list);
486         INIT_LIST_HEAD(&op->gart_list);
487         INIT_LIST_HEAD(&op->both_list);
488
489         if (nr_buffers == 0)
490                 return 0;
491
492         ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
493         if (unlikely(ret)) {
494                 if (ret != -ERESTARTSYS)
495                         NV_ERROR(drm, "validate_init\n");
496                 return ret;
497         }
498
499         ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
500         if (unlikely(ret < 0)) {
501                 if (ret != -ERESTARTSYS)
502                         NV_ERROR(drm, "validate vram_list\n");
503                 validate_fini(op, NULL);
504                 return ret;
505         }
506         relocs += ret;
507
508         ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
509         if (unlikely(ret < 0)) {
510                 if (ret != -ERESTARTSYS)
511                         NV_ERROR(drm, "validate gart_list\n");
512                 validate_fini(op, NULL);
513                 return ret;
514         }
515         relocs += ret;
516
517         ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
518         if (unlikely(ret < 0)) {
519                 if (ret != -ERESTARTSYS)
520                         NV_ERROR(drm, "validate both_list\n");
521                 validate_fini(op, NULL);
522                 return ret;
523         }
524         relocs += ret;
525
526         *apply_relocs = relocs;
527         return 0;
528 }
529
530 static inline void *
531 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
532 {
533         void *mem;
534         void __user *userptr = (void __force __user *)(uintptr_t)user;
535
536         mem = kmalloc(nmemb * size, GFP_KERNEL);
537         if (!mem)
538                 return ERR_PTR(-ENOMEM);
539
540         if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
541                 kfree(mem);
542                 return ERR_PTR(-EFAULT);
543         }
544
545         return mem;
546 }
547
548 static int
549 nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
550                                 struct drm_nouveau_gem_pushbuf *req,
551                                 struct drm_nouveau_gem_pushbuf_bo *bo)
552 {
553         struct nouveau_drm *drm = nouveau_newpriv(dev);
554         struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
555         int ret = 0;
556         unsigned i;
557
558         reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
559         if (IS_ERR(reloc))
560                 return PTR_ERR(reloc);
561
562         for (i = 0; i < req->nr_relocs; i++) {
563                 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
564                 struct drm_nouveau_gem_pushbuf_bo *b;
565                 struct nouveau_bo *nvbo;
566                 uint32_t data;
567
568                 if (unlikely(r->bo_index > req->nr_buffers)) {
569                         NV_ERROR(drm, "reloc bo index invalid\n");
570                         ret = -EINVAL;
571                         break;
572                 }
573
574                 b = &bo[r->bo_index];
575                 if (b->presumed.valid)
576                         continue;
577
578                 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
579                         NV_ERROR(drm, "reloc container bo index invalid\n");
580                         ret = -EINVAL;
581                         break;
582                 }
583                 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
584
585                 if (unlikely(r->reloc_bo_offset + 4 >
586                              nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
587                         NV_ERROR(drm, "reloc outside of bo\n");
588                         ret = -EINVAL;
589                         break;
590                 }
591
592                 if (!nvbo->kmap.virtual) {
593                         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
594                                           &nvbo->kmap);
595                         if (ret) {
596                                 NV_ERROR(drm, "failed kmap for reloc\n");
597                                 break;
598                         }
599                         nvbo->validate_mapped = true;
600                 }
601
602                 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
603                         data = b->presumed.offset + r->data;
604                 else
605                 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
606                         data = (b->presumed.offset + r->data) >> 32;
607                 else
608                         data = r->data;
609
610                 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
611                         if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
612                                 data |= r->tor;
613                         else
614                                 data |= r->vor;
615                 }
616
617                 spin_lock(&nvbo->bo.bdev->fence_lock);
618                 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
619                 spin_unlock(&nvbo->bo.bdev->fence_lock);
620                 if (ret) {
621                         NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
622                         break;
623                 }
624
625                 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
626         }
627
628         kfree(reloc);
629         return ret;
630 }
631
632 int
633 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
634                           struct drm_file *file_priv)
635 {
636         struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
637         struct nouveau_abi16_chan *temp;
638         struct nouveau_drm *drm = nouveau_drm(dev);
639         struct drm_nouveau_gem_pushbuf *req = data;
640         struct drm_nouveau_gem_pushbuf_push *push;
641         struct drm_nouveau_gem_pushbuf_bo *bo;
642         struct nouveau_channel *chan = NULL;
643         struct validate_op op;
644         struct nouveau_fence *fence = NULL;
645         int i, j, ret = 0, do_reloc = 0;
646
647         if (unlikely(!abi16))
648                 return -ENOMEM;
649
650         list_for_each_entry(temp, &abi16->channels, head) {
651                 if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
652                         chan = temp->chan;
653                         break;
654                 }
655         }
656
657         if (!chan)
658                 return nouveau_abi16_put(abi16, -ENOENT);
659
660         req->vram_available = drm->gem.vram_available;
661         req->gart_available = drm->gem.gart_available;
662         if (unlikely(req->nr_push == 0))
663                 goto out_next;
664
665         if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
666                 NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
667                          req->nr_push, NOUVEAU_GEM_MAX_PUSH);
668                 return nouveau_abi16_put(abi16, -EINVAL);
669         }
670
671         if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
672                 NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
673                          req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
674                 return nouveau_abi16_put(abi16, -EINVAL);
675         }
676
677         if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
678                 NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
679                          req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
680                 return nouveau_abi16_put(abi16, -EINVAL);
681         }
682
683         push = u_memcpya(req->push, req->nr_push, sizeof(*push));
684         if (IS_ERR(push))
685                 return nouveau_abi16_put(abi16, PTR_ERR(push));
686
687         bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
688         if (IS_ERR(bo)) {
689                 kfree(push);
690                 return nouveau_abi16_put(abi16, PTR_ERR(bo));
691         }
692
693         /* Ensure all push buffers are on validate list */
694         for (i = 0; i < req->nr_push; i++) {
695                 if (push[i].bo_index >= req->nr_buffers) {
696                         NV_ERROR(drm, "push %d buffer not in list\n", i);
697                         ret = -EINVAL;
698                         goto out_prevalid;
699                 }
700         }
701
702         /* Validate buffer list */
703         ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
704                                            req->nr_buffers, &op, &do_reloc);
705         if (ret) {
706                 if (ret != -ERESTARTSYS)
707                         NV_ERROR(drm, "validate: %d\n", ret);
708                 goto out_prevalid;
709         }
710
711         /* Apply any relocations that are required */
712         if (do_reloc) {
713                 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
714                 if (ret) {
715                         NV_ERROR(drm, "reloc apply: %d\n", ret);
716                         goto out;
717                 }
718         }
719
720         if (chan->dma.ib_max) {
721                 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
722                 if (ret) {
723                         NV_ERROR(drm, "nv50cal_space: %d\n", ret);
724                         goto out;
725                 }
726
727                 for (i = 0; i < req->nr_push; i++) {
728                         struct nouveau_bo *nvbo = (void *)(unsigned long)
729                                 bo[push[i].bo_index].user_priv;
730
731                         nv50_dma_push(chan, nvbo, push[i].offset,
732                                       push[i].length);
733                 }
734         } else
735         if (nv_device(drm->device)->chipset >= 0x25) {
736                 ret = RING_SPACE(chan, req->nr_push * 2);
737                 if (ret) {
738                         NV_ERROR(drm, "cal_space: %d\n", ret);
739                         goto out;
740                 }
741
742                 for (i = 0; i < req->nr_push; i++) {
743                         struct nouveau_bo *nvbo = (void *)(unsigned long)
744                                 bo[push[i].bo_index].user_priv;
745
746                         OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
747                         OUT_RING(chan, 0);
748                 }
749         } else {
750                 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
751                 if (ret) {
752                         NV_ERROR(drm, "jmp_space: %d\n", ret);
753                         goto out;
754                 }
755
756                 for (i = 0; i < req->nr_push; i++) {
757                         struct nouveau_bo *nvbo = (void *)(unsigned long)
758                                 bo[push[i].bo_index].user_priv;
759                         uint32_t cmd;
760
761                         cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
762                         cmd |= 0x20000000;
763                         if (unlikely(cmd != req->suffix0)) {
764                                 if (!nvbo->kmap.virtual) {
765                                         ret = ttm_bo_kmap(&nvbo->bo, 0,
766                                                           nvbo->bo.mem.
767                                                           num_pages,
768                                                           &nvbo->kmap);
769                                         if (ret) {
770                                                 WIND_RING(chan);
771                                                 goto out;
772                                         }
773                                         nvbo->validate_mapped = true;
774                                 }
775
776                                 nouveau_bo_wr32(nvbo, (push[i].offset +
777                                                 push[i].length - 8) / 4, cmd);
778                         }
779
780                         OUT_RING(chan, 0x20000000 |
781                                       (nvbo->bo.offset + push[i].offset));
782                         OUT_RING(chan, 0);
783                         for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
784                                 OUT_RING(chan, 0);
785                 }
786         }
787
788         ret = nouveau_fence_new(chan, &fence);
789         if (ret) {
790                 NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
791                 WIND_RING(chan);
792                 goto out;
793         }
794
795 out:
796         validate_fini(&op, fence);
797         nouveau_fence_unref(&fence);
798
799 out_prevalid:
800         kfree(bo);
801         kfree(push);
802
803 out_next:
804         if (chan->dma.ib_max) {
805                 req->suffix0 = 0x00000000;
806                 req->suffix1 = 0x00000000;
807         } else
808         if (nv_device(drm->device)->chipset >= 0x25) {
809                 req->suffix0 = 0x00020000;
810                 req->suffix1 = 0x00000000;
811         } else {
812                 req->suffix0 = 0x20000000 |
813                               (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
814                 req->suffix1 = 0x00000000;
815         }
816
817         return nouveau_abi16_put(abi16, ret);
818 }
819
820 static inline uint32_t
821 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
822 {
823         uint32_t flags = 0;
824
825         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
826                 flags |= TTM_PL_FLAG_VRAM;
827         if (domain & NOUVEAU_GEM_DOMAIN_GART)
828                 flags |= TTM_PL_FLAG_TT;
829
830         return flags;
831 }
832
833 int
834 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
835                            struct drm_file *file_priv)
836 {
837         struct drm_nouveau_gem_cpu_prep *req = data;
838         struct drm_gem_object *gem;
839         struct nouveau_bo *nvbo;
840         bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
841         int ret = -EINVAL;
842
843         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
844         if (!gem)
845                 return -ENOENT;
846         nvbo = nouveau_gem_object(gem);
847
848         spin_lock(&nvbo->bo.bdev->fence_lock);
849         ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
850         spin_unlock(&nvbo->bo.bdev->fence_lock);
851         drm_gem_object_unreference_unlocked(gem);
852         return ret;
853 }
854
855 int
856 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
857                            struct drm_file *file_priv)
858 {
859         return 0;
860 }
861
862 int
863 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
864                        struct drm_file *file_priv)
865 {
866         struct drm_nouveau_gem_info *req = data;
867         struct drm_gem_object *gem;
868         int ret;
869
870         gem = drm_gem_object_lookup(dev, file_priv, req->handle);
871         if (!gem)
872                 return -ENOENT;
873
874         ret = nouveau_gem_info(file_priv, gem, req);
875         drm_gem_object_unreference_unlocked(gem);
876         return ret;
877 }
878