2 * Copyright (C) 2006 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Ben Skeggs <darktama@iinet.net.au>
35 #include "nouveau_drv.h"
36 #include "nouveau_drm.h"
37 #include "nouveau_ramht.h"
39 /* NVidia uses context objects to drive drawing operations.
41 Context objects can be selected into 8 subchannels in the FIFO,
42 and then used via DMA command buffers.
44 A context object is referenced by a user defined handle (CARD32). The HW
45 looks up graphics objects in a hash table in the instance RAM.
47 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
48 the handle, the second one a bitfield, that contains the address of the
49 object in instance RAM.
51 The format of the second CARD32 seems to be:
55 15: 0 instance_addr >> 4
56 17:16 engine (here uses 1 = graphics)
57 28:24 channel id (here uses 0)
62 15: 0 instance_addr >> 4 (maybe 19-0)
63 21:20 engine (here uses 1 = graphics)
64 I'm unsure about the other bits, but using 0 seems to work.
66 The key into the hash table depends on the object handle and channel id and
71 nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
72 uint32_t size, int align, uint32_t flags,
73 struct nouveau_gpuobj **gpuobj_ret)
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_engine *engine = &dev_priv->engine;
77 struct nouveau_gpuobj *gpuobj;
78 struct drm_mm_node *ramin = NULL;
81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
82 chan ? chan->id : -1, size, align, flags);
84 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
87 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
90 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
92 gpuobj->flags = flags;
93 kref_init(&gpuobj->refcount);
96 spin_lock(&dev_priv->ramin_lock);
97 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
98 spin_unlock(&dev_priv->ramin_lock);
101 NV_DEBUG(dev, "channel heap\n");
103 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
105 ramin = drm_mm_get_block(ramin, size, align);
108 nouveau_gpuobj_ref(NULL, &gpuobj);
112 NV_DEBUG(dev, "global heap\n");
114 /* allocate backing pages, sets vinst */
115 ret = engine->instmem.populate(dev, gpuobj, &size);
117 nouveau_gpuobj_ref(NULL, &gpuobj);
121 /* try and get aperture space */
123 if (drm_mm_pre_get(&dev_priv->ramin_heap))
126 spin_lock(&dev_priv->ramin_lock);
127 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
130 spin_unlock(&dev_priv->ramin_lock);
131 nouveau_gpuobj_ref(NULL, &gpuobj);
135 ramin = drm_mm_get_block_atomic(ramin, size, align);
136 spin_unlock(&dev_priv->ramin_lock);
137 } while (ramin == NULL);
139 /* on nv50 it's ok to fail, we have a fallback path */
140 if (!ramin && dev_priv->card_type < NV_50) {
141 nouveau_gpuobj_ref(NULL, &gpuobj);
146 /* if we got a chunk of the aperture, map pages into it */
147 gpuobj->im_pramin = ramin;
148 if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
149 ret = engine->instmem.bind(dev, gpuobj);
151 nouveau_gpuobj_ref(NULL, &gpuobj);
156 /* calculate the various different addresses for the object */
158 gpuobj->pinst = chan->ramin->pinst;
159 if (gpuobj->pinst != ~0)
160 gpuobj->pinst += gpuobj->im_pramin->start;
162 if (dev_priv->card_type < NV_50) {
163 gpuobj->cinst = gpuobj->pinst;
165 gpuobj->cinst = gpuobj->im_pramin->start;
166 gpuobj->vinst = gpuobj->im_pramin->start +
170 if (gpuobj->im_pramin)
171 gpuobj->pinst = gpuobj->im_pramin->start;
174 gpuobj->cinst = 0xdeadbeef;
177 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
180 for (i = 0; i < gpuobj->size; i += 4)
181 nv_wo32(gpuobj, i, 0);
182 engine->instmem.flush(dev);
186 *gpuobj_ret = gpuobj;
191 nouveau_gpuobj_init(struct drm_device *dev)
193 struct drm_nouveau_private *dev_priv = dev->dev_private;
197 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
198 spin_lock_init(&dev_priv->ramin_lock);
199 dev_priv->ramin_base = ~0;
205 nouveau_gpuobj_takedown(struct drm_device *dev)
207 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
216 nouveau_gpuobj_del(struct kref *ref)
218 struct nouveau_gpuobj *gpuobj =
219 container_of(ref, struct nouveau_gpuobj, refcount);
220 struct drm_device *dev = gpuobj->dev;
221 struct drm_nouveau_private *dev_priv = dev->dev_private;
222 struct nouveau_engine *engine = &dev_priv->engine;
225 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
227 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
228 for (i = 0; i < gpuobj->size; i += 4)
229 nv_wo32(gpuobj, i, 0);
230 engine->instmem.flush(dev);
234 gpuobj->dtor(dev, gpuobj);
236 if (gpuobj->im_backing)
237 engine->instmem.clear(dev, gpuobj);
239 spin_lock(&dev_priv->ramin_lock);
240 if (gpuobj->im_pramin)
241 drm_mm_put_block(gpuobj->im_pramin);
242 list_del(&gpuobj->list);
243 spin_unlock(&dev_priv->ramin_lock);
249 nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
252 kref_get(&ref->refcount);
255 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
261 nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
262 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
264 struct drm_nouveau_private *dev_priv = dev->dev_private;
265 struct nouveau_gpuobj *gpuobj = NULL;
269 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
270 pinst, vinst, size, flags);
272 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
275 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
277 gpuobj->flags = flags;
278 kref_init(&gpuobj->refcount);
280 gpuobj->pinst = pinst;
281 gpuobj->cinst = 0xdeadbeef;
282 gpuobj->vinst = vinst;
284 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
285 for (i = 0; i < gpuobj->size; i += 4)
286 nv_wo32(gpuobj, i, 0);
287 dev_priv->engine.instmem.flush(dev);
290 spin_lock(&dev_priv->ramin_lock);
291 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
292 spin_unlock(&dev_priv->ramin_lock);
299 nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
301 struct drm_nouveau_private *dev_priv = dev->dev_private;
303 /*XXX: dodgy hack for now */
304 if (dev_priv->card_type >= NV_50)
306 if (dev_priv->card_type >= NV_40)
312 DMA objects are used to reference a piece of memory in the
313 framebuffer, PCI or AGP address space. Each object is 16 bytes big
314 and looks as follows:
317 11:0 class (seems like I can always use 0 here)
318 12 page table present?
319 13 page entry linear?
320 15:14 access: 0 rw, 1 ro, 2 wo
321 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
322 31:20 dma adjust (bits 0-11 of the address)
324 dma limit (size of transfer)
326 1 0 readonly, 1 readwrite
327 31:12 dma frame address of the page (bits 12-31 of the address)
329 page table terminator, same value as the first pte, as does nvidia
330 rivatv uses 0xffffffff
332 Non linear page tables need a list of frame addresses afterwards,
333 the rivatv project has some info on this.
335 The method below creates a DMA object in instance RAM and returns a handle
336 to it that can be used to set up context objects.
339 nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
340 uint64_t offset, uint64_t size, int access,
341 int target, struct nouveau_gpuobj **gpuobj)
343 struct drm_device *dev = chan->dev;
344 struct drm_nouveau_private *dev_priv = dev->dev_private;
345 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
348 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
349 chan->id, class, offset, size);
350 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
353 case NV_DMA_TARGET_AGP:
354 offset += dev_priv->gart_info.aper_base;
360 ret = nouveau_gpuobj_new(dev, chan,
361 nouveau_gpuobj_class_instmem_size(dev, class),
362 16, NVOBJ_FLAG_ZERO_ALLOC |
363 NVOBJ_FLAG_ZERO_FREE, gpuobj);
365 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
369 if (dev_priv->card_type < NV_50) {
370 uint32_t frame, adjust, pte_flags = 0;
372 if (access != NV_DMA_ACCESS_RO)
374 adjust = offset & 0x00000fff;
375 frame = offset & ~0x00000fff;
377 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
378 (access << 14) | (target << 16) |
380 nv_wo32(*gpuobj, 4, size - 1);
381 nv_wo32(*gpuobj, 8, frame | pte_flags);
382 nv_wo32(*gpuobj, 12, frame | pte_flags);
384 uint64_t limit = offset + size - 1;
385 uint32_t flags0, flags5;
387 if (target == NV_DMA_TARGET_VIDMEM) {
395 nv_wo32(*gpuobj, 0, flags0 | class);
396 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
397 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
398 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
399 (upper_32_bits(offset) & 0xff));
400 nv_wo32(*gpuobj, 20, flags5);
405 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
406 (*gpuobj)->class = class;
411 nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
412 uint64_t offset, uint64_t size, int access,
413 struct nouveau_gpuobj **gpuobj,
416 struct drm_device *dev = chan->dev;
417 struct drm_nouveau_private *dev_priv = dev->dev_private;
420 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
421 (dev_priv->card_type >= NV_50 &&
422 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
423 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
424 offset + dev_priv->vm_gart_base,
425 size, access, NV_DMA_TARGET_AGP,
430 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
431 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
432 if (offset & ~0xffffffffULL) {
433 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
437 *o_ret = (uint32_t)offset;
438 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
440 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
447 /* Context objects in the instance RAM have the following structure.
448 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
458 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
459 18 synchronize enable
460 19 endian: 1 big, 0 little
462 23 single step enable
463 24 patch status: 0 invalid, 1 valid
464 25 context_surface 0: 1 valid
465 26 context surface 1: 1 valid
466 27 context pattern: 1 valid
467 28 context rop: 1 valid
468 29,30 context beta, beta4
472 31:16 notify instance address
474 15:0 dma 0 instance address
475 31:16 dma 1 instance address
480 No idea what the exact format is. Here's what can be deducted:
483 11:0 class (maybe uses more bits here?)
486 25 patch status valid ?
488 15:0 DMA notifier (maybe 20:0)
490 15:0 DMA 0 instance (maybe 20:0)
493 15:0 DMA 1 instance (maybe 20:0)
499 nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
500 struct nouveau_gpuobj **gpuobj)
502 struct drm_device *dev = chan->dev;
503 struct drm_nouveau_private *dev_priv = dev->dev_private;
506 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
508 ret = nouveau_gpuobj_new(dev, chan,
509 nouveau_gpuobj_class_instmem_size(dev, class),
511 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
514 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
518 if (dev_priv->card_type >= NV_50) {
519 nv_wo32(*gpuobj, 0, class);
520 nv_wo32(*gpuobj, 20, 0x00010000);
524 nv_wo32(*gpuobj, 0, 0x00001030);
525 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
528 if (dev_priv->card_type >= NV_40) {
529 nv_wo32(*gpuobj, 0, class);
531 nv_wo32(*gpuobj, 8, 0x01000000);
535 nv_wo32(*gpuobj, 0, class | 0x00080000);
537 nv_wo32(*gpuobj, 0, class);
542 dev_priv->engine.instmem.flush(dev);
544 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
545 (*gpuobj)->class = class;
550 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
551 struct nouveau_gpuobj **gpuobj_ret)
553 struct drm_nouveau_private *dev_priv;
554 struct nouveau_gpuobj *gpuobj;
556 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
558 dev_priv = chan->dev->dev_private;
560 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
563 gpuobj->dev = chan->dev;
564 gpuobj->engine = NVOBJ_ENGINE_SW;
565 gpuobj->class = class;
566 kref_init(&gpuobj->refcount);
567 gpuobj->cinst = 0x40;
569 spin_lock(&dev_priv->ramin_lock);
570 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
571 spin_unlock(&dev_priv->ramin_lock);
572 *gpuobj_ret = gpuobj;
577 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
579 struct drm_device *dev = chan->dev;
580 struct drm_nouveau_private *dev_priv = dev->dev_private;
585 NV_DEBUG(dev, "ch%d\n", chan->id);
587 /* Base amount for object storage (4KiB enough?) */
592 size += dev_priv->engine.graph.grctx_size;
594 if (dev_priv->card_type == NV_50) {
595 /* Various fixed table thingos */
596 size += 0x1400; /* mostly unknown stuff */
597 size += 0x4000; /* vm pd */
599 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
605 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
607 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
611 ret = drm_mm_init(&chan->ramin_heap, base, size);
613 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
614 nouveau_gpuobj_ref(NULL, &chan->ramin);
622 nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
623 uint32_t vram_h, uint32_t tt_h)
625 struct drm_device *dev = chan->dev;
626 struct drm_nouveau_private *dev_priv = dev->dev_private;
627 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
628 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
631 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
633 /* Allocate a chunk of memory for per-channel object storage */
634 ret = nouveau_gpuobj_channel_init_pramin(chan);
636 NV_ERROR(dev, "init pramin\n");
641 * - Allocate per-channel page-directory
642 * - Map GART and VRAM into the channel's address space at the
643 * locations determined during init.
645 if (dev_priv->card_type >= NV_50) {
646 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
647 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
648 u32 vm_pinst = chan->ramin->pinst;
652 vm_pinst += pgd_offs;
654 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
658 for (i = 0; i < 0x4000; i += 8) {
659 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
660 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
663 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
665 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
666 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
667 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
669 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
670 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
671 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
672 &chan->vm_vram_pt[i]);
674 nv_wo32(chan->vm_pd, pde + 0,
675 chan->vm_vram_pt[i]->vinst | 0x61);
676 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
684 if (dev_priv->card_type < NV_50) {
685 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
687 struct nouveau_gpuobj *ramht = NULL;
689 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
690 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
694 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
695 nouveau_gpuobj_ref(NULL, &ramht);
701 if (dev_priv->card_type >= NV_50) {
702 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
705 NV_DMA_TARGET_AGP, &vram);
707 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
711 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
712 0, dev_priv->fb_available_size,
714 NV_DMA_TARGET_VIDMEM, &vram);
716 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
721 ret = nouveau_ramht_insert(chan, vram_h, vram);
722 nouveau_gpuobj_ref(NULL, &vram);
724 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
728 /* TT memory ctxdma */
729 if (dev_priv->card_type >= NV_50) {
730 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
733 NV_DMA_TARGET_AGP, &tt);
735 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
739 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
740 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
741 dev_priv->gart_info.aper_size,
742 NV_DMA_ACCESS_RW, &tt, NULL);
744 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
749 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
753 ret = nouveau_ramht_insert(chan, tt_h, tt);
754 nouveau_gpuobj_ref(NULL, &tt);
756 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
764 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
766 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
767 struct drm_device *dev = chan->dev;
770 NV_DEBUG(dev, "ch%d\n", chan->id);
775 nouveau_ramht_ref(NULL, &chan->ramht, chan);
777 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
778 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
779 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
780 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
782 if (chan->ramin_heap.free_stack.next)
783 drm_mm_takedown(&chan->ramin_heap);
784 nouveau_gpuobj_ref(NULL, &chan->ramin);
788 nouveau_gpuobj_suspend(struct drm_device *dev)
790 struct drm_nouveau_private *dev_priv = dev->dev_private;
791 struct nouveau_gpuobj *gpuobj;
794 if (dev_priv->card_type < NV_50) {
795 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
796 if (!dev_priv->susres.ramin_copy)
799 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
800 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
804 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
805 if (!gpuobj->im_backing)
808 gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
809 if (!gpuobj->im_backing_suspend) {
810 nouveau_gpuobj_resume(dev);
814 for (i = 0; i < gpuobj->size; i += 4)
815 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
822 nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
824 struct drm_nouveau_private *dev_priv = dev->dev_private;
825 struct nouveau_gpuobj *gpuobj;
827 if (dev_priv->card_type < NV_50) {
828 vfree(dev_priv->susres.ramin_copy);
829 dev_priv->susres.ramin_copy = NULL;
833 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
834 if (!gpuobj->im_backing_suspend)
837 vfree(gpuobj->im_backing_suspend);
838 gpuobj->im_backing_suspend = NULL;
843 nouveau_gpuobj_resume(struct drm_device *dev)
845 struct drm_nouveau_private *dev_priv = dev->dev_private;
846 struct nouveau_gpuobj *gpuobj;
849 if (dev_priv->card_type < NV_50) {
850 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
851 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
852 nouveau_gpuobj_suspend_cleanup(dev);
856 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
857 if (!gpuobj->im_backing_suspend)
860 for (i = 0; i < gpuobj->size; i += 4)
861 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
862 dev_priv->engine.instmem.flush(dev);
865 nouveau_gpuobj_suspend_cleanup(dev);
868 int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
869 struct drm_file *file_priv)
871 struct drm_nouveau_private *dev_priv = dev->dev_private;
872 struct drm_nouveau_grobj_alloc *init = data;
873 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
874 struct nouveau_pgraph_object_class *grc;
875 struct nouveau_gpuobj *gr = NULL;
876 struct nouveau_channel *chan;
879 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
881 if (init->handle == ~0)
884 grc = pgraph->grclass;
886 if (grc->id == init->class)
892 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
896 if (nouveau_ramht_find(chan, init->handle))
900 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
902 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
904 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
905 ret, init->channel, init->handle);
909 ret = nouveau_ramht_insert(chan, init->handle, gr);
910 nouveau_gpuobj_ref(NULL, &gr);
912 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
913 ret, init->channel, init->handle);
920 int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
921 struct drm_file *file_priv)
923 struct drm_nouveau_gpuobj_free *objfree = data;
924 struct nouveau_gpuobj *gpuobj;
925 struct nouveau_channel *chan;
927 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
929 gpuobj = nouveau_ramht_find(chan, objfree->handle);
933 nouveau_ramht_remove(chan, objfree->handle);
938 nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
940 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
941 struct drm_device *dev = gpuobj->dev;
943 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
944 u64 ptr = gpuobj->vinst + offset;
945 u32 base = ptr >> 16;
948 spin_lock(&dev_priv->ramin_lock);
949 if (dev_priv->ramin_base != base) {
950 dev_priv->ramin_base = base;
951 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
953 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
954 spin_unlock(&dev_priv->ramin_lock);
958 return nv_ri32(dev, gpuobj->pinst + offset);
962 nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
964 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
965 struct drm_device *dev = gpuobj->dev;
967 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
968 u64 ptr = gpuobj->vinst + offset;
969 u32 base = ptr >> 16;
971 spin_lock(&dev_priv->ramin_lock);
972 if (dev_priv->ramin_base != base) {
973 dev_priv->ramin_base = base;
974 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
976 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
977 spin_unlock(&dev_priv->ramin_lock);
981 nv_wi32(dev, gpuobj->pinst + offset, val);