2 * Copyright (C) 2006 Ben Skeggs.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 * Ben Skeggs <darktama@iinet.net.au>
35 #include "nouveau_drv.h"
36 #include "nouveau_drm.h"
37 #include "nouveau_ramht.h"
39 /* NVidia uses context objects to drive drawing operations.
41 Context objects can be selected into 8 subchannels in the FIFO,
42 and then used via DMA command buffers.
44 A context object is referenced by a user defined handle (CARD32). The HW
45 looks up graphics objects in a hash table in the instance RAM.
47 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
48 the handle, the second one a bitfield, that contains the address of the
49 object in instance RAM.
51 The format of the second CARD32 seems to be:
55 15: 0 instance_addr >> 4
56 17:16 engine (here uses 1 = graphics)
57 28:24 channel id (here uses 0)
62 15: 0 instance_addr >> 4 (maybe 19-0)
63 21:20 engine (here uses 1 = graphics)
64 I'm unsure about the other bits, but using 0 seems to work.
66 The key into the hash table depends on the object handle and channel id and
71 nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
72 uint32_t size, int align, uint32_t flags,
73 struct nouveau_gpuobj **gpuobj_ret)
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_engine *engine = &dev_priv->engine;
77 struct nouveau_gpuobj *gpuobj;
78 struct drm_mm *pramin = NULL;
81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
82 chan ? chan->id : -1, size, align, flags);
84 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
87 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
90 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
92 gpuobj->flags = flags;
95 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
97 /* Choose between global instmem heap, and per-channel private
98 * instmem heap. On <NV50 allow requests for private instmem
99 * to be satisfied from global heap if no per-channel area
103 NV_DEBUG(dev, "channel heap\n");
104 pramin = &chan->ramin_heap;
106 NV_DEBUG(dev, "global heap\n");
107 pramin = &dev_priv->ramin_heap;
109 ret = engine->instmem.populate(dev, gpuobj, &size);
111 nouveau_gpuobj_ref(NULL, &gpuobj);
116 /* Allocate a chunk of the PRAMIN aperture */
117 gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
118 if (gpuobj->im_pramin)
119 gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
121 if (!gpuobj->im_pramin) {
122 nouveau_gpuobj_ref(NULL, &gpuobj);
127 ret = engine->instmem.bind(dev, gpuobj);
129 nouveau_gpuobj_ref(NULL, &gpuobj);
134 /* calculate the various different addresses for the object */
136 gpuobj->pinst = gpuobj->im_pramin->start +
137 chan->ramin->im_pramin->start;
138 if (dev_priv->card_type < NV_50) {
139 gpuobj->cinst = gpuobj->pinst;
141 gpuobj->cinst = gpuobj->im_pramin->start;
142 gpuobj->vinst = gpuobj->im_pramin->start +
143 chan->ramin->im_backing_start;
146 gpuobj->pinst = gpuobj->im_pramin->start;
147 gpuobj->cinst = 0xdeadbeef;
148 gpuobj->vinst = gpuobj->im_backing_start;
151 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
154 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
155 nv_wo32(gpuobj, i, 0);
156 engine->instmem.flush(dev);
160 *gpuobj_ret = gpuobj;
165 nouveau_gpuobj_early_init(struct drm_device *dev)
167 struct drm_nouveau_private *dev_priv = dev->dev_private;
171 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
177 nouveau_gpuobj_init(struct drm_device *dev)
179 struct drm_nouveau_private *dev_priv = dev->dev_private;
180 struct nouveau_gpuobj *ramht = NULL;
185 if (dev_priv->card_type >= NV_50)
188 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset, ~0,
189 dev_priv->ramht_size,
190 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
194 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
195 nouveau_gpuobj_ref(NULL, &ramht);
200 nouveau_gpuobj_takedown(struct drm_device *dev)
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
210 nouveau_gpuobj_late_takedown(struct drm_device *dev)
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213 struct nouveau_gpuobj *gpuobj = NULL;
214 struct list_head *entry, *tmp;
218 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
219 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
221 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
222 gpuobj, gpuobj->refcount);
224 gpuobj->refcount = 1;
225 nouveau_gpuobj_ref(NULL, &gpuobj);
230 nouveau_gpuobj_del(struct nouveau_gpuobj *gpuobj)
232 struct drm_device *dev = gpuobj->dev;
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_engine *engine = &dev_priv->engine;
237 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
239 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
240 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
241 nv_wo32(gpuobj, i, 0);
242 engine->instmem.flush(dev);
246 gpuobj->dtor(dev, gpuobj);
248 if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
249 engine->instmem.clear(dev, gpuobj);
251 if (gpuobj->im_pramin) {
252 if (gpuobj->flags & NVOBJ_FLAG_FAKE)
253 kfree(gpuobj->im_pramin);
255 drm_mm_put_block(gpuobj->im_pramin);
258 list_del(&gpuobj->list);
265 nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
270 if (*ptr && --(*ptr)->refcount == 0)
271 nouveau_gpuobj_del(*ptr);
277 nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
278 uint32_t b_offset, uint32_t size,
279 uint32_t flags, struct nouveau_gpuobj **pgpuobj)
281 struct drm_nouveau_private *dev_priv = dev->dev_private;
282 struct nouveau_gpuobj *gpuobj = NULL;
286 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
287 p_offset, b_offset, size, flags);
289 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
292 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
294 gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
295 gpuobj->refcount = 1;
297 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
299 if (p_offset != ~0) {
300 gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
302 if (!gpuobj->im_pramin) {
303 nouveau_gpuobj_ref(NULL, &gpuobj);
306 gpuobj->im_pramin->start = p_offset;
307 gpuobj->im_pramin->size = size;
310 if (b_offset != ~0) {
311 gpuobj->im_backing = (struct nouveau_bo *)-1;
312 gpuobj->im_backing_start = b_offset;
315 gpuobj->pinst = gpuobj->im_pramin->start;
316 gpuobj->cinst = 0xdeadbeef;
317 gpuobj->vinst = gpuobj->im_backing_start;
319 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
320 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
321 nv_wo32(gpuobj, i, 0);
322 dev_priv->engine.instmem.flush(dev);
332 nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
334 struct drm_nouveau_private *dev_priv = dev->dev_private;
336 /*XXX: dodgy hack for now */
337 if (dev_priv->card_type >= NV_50)
339 if (dev_priv->card_type >= NV_40)
345 DMA objects are used to reference a piece of memory in the
346 framebuffer, PCI or AGP address space. Each object is 16 bytes big
347 and looks as follows:
350 11:0 class (seems like I can always use 0 here)
351 12 page table present?
352 13 page entry linear?
353 15:14 access: 0 rw, 1 ro, 2 wo
354 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
355 31:20 dma adjust (bits 0-11 of the address)
357 dma limit (size of transfer)
359 1 0 readonly, 1 readwrite
360 31:12 dma frame address of the page (bits 12-31 of the address)
362 page table terminator, same value as the first pte, as does nvidia
363 rivatv uses 0xffffffff
365 Non linear page tables need a list of frame addresses afterwards,
366 the rivatv project has some info on this.
368 The method below creates a DMA object in instance RAM and returns a handle
369 to it that can be used to set up context objects.
372 nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
373 uint64_t offset, uint64_t size, int access,
374 int target, struct nouveau_gpuobj **gpuobj)
376 struct drm_device *dev = chan->dev;
377 struct drm_nouveau_private *dev_priv = dev->dev_private;
378 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
381 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
382 chan->id, class, offset, size);
383 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
386 case NV_DMA_TARGET_AGP:
387 offset += dev_priv->gart_info.aper_base;
393 ret = nouveau_gpuobj_new(dev, chan,
394 nouveau_gpuobj_class_instmem_size(dev, class),
395 16, NVOBJ_FLAG_ZERO_ALLOC |
396 NVOBJ_FLAG_ZERO_FREE, gpuobj);
398 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
402 if (dev_priv->card_type < NV_50) {
403 uint32_t frame, adjust, pte_flags = 0;
405 if (access != NV_DMA_ACCESS_RO)
407 adjust = offset & 0x00000fff;
408 frame = offset & ~0x00000fff;
410 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
411 (access << 14) | (target << 16) |
413 nv_wo32(*gpuobj, 4, size - 1);
414 nv_wo32(*gpuobj, 8, frame | pte_flags);
415 nv_wo32(*gpuobj, 12, frame | pte_flags);
417 uint64_t limit = offset + size - 1;
418 uint32_t flags0, flags5;
420 if (target == NV_DMA_TARGET_VIDMEM) {
428 nv_wo32(*gpuobj, 0, flags0 | class);
429 nv_wo32(*gpuobj, 4, lower_32_bits(limit));
430 nv_wo32(*gpuobj, 8, lower_32_bits(offset));
431 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
432 (upper_32_bits(offset) & 0xff));
433 nv_wo32(*gpuobj, 20, flags5);
438 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
439 (*gpuobj)->class = class;
444 nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
445 uint64_t offset, uint64_t size, int access,
446 struct nouveau_gpuobj **gpuobj,
449 struct drm_device *dev = chan->dev;
450 struct drm_nouveau_private *dev_priv = dev->dev_private;
453 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
454 (dev_priv->card_type >= NV_50 &&
455 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
456 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
457 offset + dev_priv->vm_gart_base,
458 size, access, NV_DMA_TARGET_AGP,
463 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
464 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
465 if (offset & ~0xffffffffULL) {
466 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
470 *o_ret = (uint32_t)offset;
471 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
473 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
480 /* Context objects in the instance RAM have the following structure.
481 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
491 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
492 18 synchronize enable
493 19 endian: 1 big, 0 little
495 23 single step enable
496 24 patch status: 0 invalid, 1 valid
497 25 context_surface 0: 1 valid
498 26 context surface 1: 1 valid
499 27 context pattern: 1 valid
500 28 context rop: 1 valid
501 29,30 context beta, beta4
505 31:16 notify instance address
507 15:0 dma 0 instance address
508 31:16 dma 1 instance address
513 No idea what the exact format is. Here's what can be deducted:
516 11:0 class (maybe uses more bits here?)
519 25 patch status valid ?
521 15:0 DMA notifier (maybe 20:0)
523 15:0 DMA 0 instance (maybe 20:0)
526 15:0 DMA 1 instance (maybe 20:0)
532 nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
533 struct nouveau_gpuobj **gpuobj)
535 struct drm_device *dev = chan->dev;
536 struct drm_nouveau_private *dev_priv = dev->dev_private;
539 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
541 ret = nouveau_gpuobj_new(dev, chan,
542 nouveau_gpuobj_class_instmem_size(dev, class),
544 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
547 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
551 if (dev_priv->card_type >= NV_50) {
552 nv_wo32(*gpuobj, 0, class);
553 nv_wo32(*gpuobj, 20, 0x00010000);
557 nv_wo32(*gpuobj, 0, 0x00001030);
558 nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
561 if (dev_priv->card_type >= NV_40) {
562 nv_wo32(*gpuobj, 0, class);
564 nv_wo32(*gpuobj, 8, 0x01000000);
568 nv_wo32(*gpuobj, 0, class | 0x00080000);
570 nv_wo32(*gpuobj, 0, class);
575 dev_priv->engine.instmem.flush(dev);
577 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
578 (*gpuobj)->class = class;
583 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
584 struct nouveau_gpuobj **gpuobj_ret)
586 struct drm_nouveau_private *dev_priv;
587 struct nouveau_gpuobj *gpuobj;
589 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
591 dev_priv = chan->dev->dev_private;
593 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
596 gpuobj->dev = chan->dev;
597 gpuobj->engine = NVOBJ_ENGINE_SW;
598 gpuobj->class = class;
599 gpuobj->refcount = 1;
600 gpuobj->cinst = 0x40;
602 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
603 *gpuobj_ret = gpuobj;
608 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
610 struct drm_device *dev = chan->dev;
611 struct drm_nouveau_private *dev_priv = dev->dev_private;
616 NV_DEBUG(dev, "ch%d\n", chan->id);
618 /* Base amount for object storage (4KiB enough?) */
623 size += dev_priv->engine.graph.grctx_size;
625 if (dev_priv->card_type == NV_50) {
626 /* Various fixed table thingos */
627 size += 0x1400; /* mostly unknown stuff */
628 size += 0x4000; /* vm pd */
630 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
636 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
638 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
642 ret = drm_mm_init(&chan->ramin_heap, base, size);
644 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
645 nouveau_gpuobj_ref(NULL, &chan->ramin);
653 nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
654 uint32_t vram_h, uint32_t tt_h)
656 struct drm_device *dev = chan->dev;
657 struct drm_nouveau_private *dev_priv = dev->dev_private;
658 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
659 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
662 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
664 /* Allocate a chunk of memory for per-channel object storage */
665 ret = nouveau_gpuobj_channel_init_pramin(chan);
667 NV_ERROR(dev, "init pramin\n");
672 * - Allocate per-channel page-directory
673 * - Map GART and VRAM into the channel's address space at the
674 * locations determined during init.
676 if (dev_priv->card_type >= NV_50) {
677 uint32_t vm_offset, pde;
679 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
680 vm_offset += chan->ramin->im_pramin->start;
682 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
686 for (i = 0; i < 0x4000; i += 8) {
687 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
688 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
691 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
693 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
694 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
695 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
697 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
698 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
699 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
700 &chan->vm_vram_pt[i]);
702 nv_wo32(chan->vm_pd, pde + 0,
703 chan->vm_vram_pt[i]->vinst | 0x61);
704 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
712 if (dev_priv->card_type < NV_50) {
713 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
715 struct nouveau_gpuobj *ramht = NULL;
717 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
718 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
722 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
723 nouveau_gpuobj_ref(NULL, &ramht);
729 if (dev_priv->card_type >= NV_50) {
730 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
733 NV_DMA_TARGET_AGP, &vram);
735 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
739 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
740 0, dev_priv->fb_available_size,
742 NV_DMA_TARGET_VIDMEM, &vram);
744 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
749 ret = nouveau_ramht_insert(chan, vram_h, vram);
750 nouveau_gpuobj_ref(NULL, &vram);
752 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
756 /* TT memory ctxdma */
757 if (dev_priv->card_type >= NV_50) {
758 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
761 NV_DMA_TARGET_AGP, &tt);
763 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
767 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
768 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
769 dev_priv->gart_info.aper_size,
770 NV_DMA_ACCESS_RW, &tt, NULL);
772 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
777 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
781 ret = nouveau_ramht_insert(chan, tt_h, tt);
782 nouveau_gpuobj_ref(NULL, &tt);
784 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
792 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
794 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
795 struct drm_device *dev = chan->dev;
798 NV_DEBUG(dev, "ch%d\n", chan->id);
803 nouveau_ramht_ref(NULL, &chan->ramht, chan);
805 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
806 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
807 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
808 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
810 if (chan->ramin_heap.free_stack.next)
811 drm_mm_takedown(&chan->ramin_heap);
812 nouveau_gpuobj_ref(NULL, &chan->ramin);
816 nouveau_gpuobj_suspend(struct drm_device *dev)
818 struct drm_nouveau_private *dev_priv = dev->dev_private;
819 struct nouveau_gpuobj *gpuobj;
822 if (dev_priv->card_type < NV_50) {
823 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
824 if (!dev_priv->susres.ramin_copy)
827 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
828 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
832 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
833 if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
836 gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
837 if (!gpuobj->im_backing_suspend) {
838 nouveau_gpuobj_resume(dev);
842 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
843 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
850 nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
852 struct drm_nouveau_private *dev_priv = dev->dev_private;
853 struct nouveau_gpuobj *gpuobj;
855 if (dev_priv->card_type < NV_50) {
856 vfree(dev_priv->susres.ramin_copy);
857 dev_priv->susres.ramin_copy = NULL;
861 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
862 if (!gpuobj->im_backing_suspend)
865 vfree(gpuobj->im_backing_suspend);
866 gpuobj->im_backing_suspend = NULL;
871 nouveau_gpuobj_resume(struct drm_device *dev)
873 struct drm_nouveau_private *dev_priv = dev->dev_private;
874 struct nouveau_gpuobj *gpuobj;
877 if (dev_priv->card_type < NV_50) {
878 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
879 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
880 nouveau_gpuobj_suspend_cleanup(dev);
884 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
885 if (!gpuobj->im_backing_suspend)
888 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
889 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
890 dev_priv->engine.instmem.flush(dev);
893 nouveau_gpuobj_suspend_cleanup(dev);
896 int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
897 struct drm_file *file_priv)
899 struct drm_nouveau_private *dev_priv = dev->dev_private;
900 struct drm_nouveau_grobj_alloc *init = data;
901 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
902 struct nouveau_pgraph_object_class *grc;
903 struct nouveau_gpuobj *gr = NULL;
904 struct nouveau_channel *chan;
907 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
909 if (init->handle == ~0)
912 grc = pgraph->grclass;
914 if (grc->id == init->class)
920 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
924 if (nouveau_ramht_find(chan, init->handle))
928 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
930 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
932 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
933 ret, init->channel, init->handle);
937 ret = nouveau_ramht_insert(chan, init->handle, gr);
938 nouveau_gpuobj_ref(NULL, &gr);
940 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
941 ret, init->channel, init->handle);
948 int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
949 struct drm_file *file_priv)
951 struct drm_nouveau_gpuobj_free *objfree = data;
952 struct nouveau_gpuobj *gpuobj;
953 struct nouveau_channel *chan;
955 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
957 gpuobj = nouveau_ramht_find(chan, objfree->handle);
961 nouveau_ramht_remove(chan, objfree->handle);
966 nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
968 return nv_ri32(gpuobj->dev, gpuobj->pinst + offset);
972 nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
974 nv_wi32(gpuobj->dev, gpuobj->pinst + offset, val);