]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/gpu/drm/nouveau/nouveau_sgdma.c
Merge branch 'master' into csb1725
[mv-sheeva.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
index 6b9187d7f67de4383502973ef5cdfc9259ec6f1a..d4ac970070386cea340283ba459194eb8b1de50a 100644 (file)
@@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
        struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
        unsigned i, j, pte;
 
-       NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
+       NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
 
-       pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
+       pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
        nvbe->pte_start = pte;
        for (i = 0; i < nvbe->nr_pages; i++) {
                dma_addr_t dma_offset = nvbe->pages[i];
@@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
                uint32_t offset_h = upper_32_bits(dma_offset);
 
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-                       if (dev_priv->card_type < NV_50)
-                               nv_wo32(dev, gpuobj, pte++, offset_l | 3);
-                       else {
-                               nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
-                               nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
+                       if (dev_priv->card_type < NV_50) {
+                               nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
+                               pte += 1;
+                       } else {
+                               nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
+                               nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
+                               pte += 2;
                        }
 
                        dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -118,8 +120,8 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
        dev_priv->engine.instmem.flush(nvbe->dev);
 
        if (dev_priv->card_type == NV_50) {
-               nv50_vm_flush(dev, 5); /* PGRAPH */
-               nv50_vm_flush(dev, 0); /* PFIFO */
+               dev_priv->engine.fifo.tlb_flush(dev);
+               dev_priv->engine.graph.tlb_flush(dev);
        }
 
        nvbe->bound = true;
@@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
                dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
 
                for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-                       if (dev_priv->card_type < NV_50)
-                               nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
-                       else {
-                               nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
-                               nv_wo32(dev, gpuobj, pte++, 0x00000000);
+                       if (dev_priv->card_type < NV_50) {
+                               nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
+                               pte += 1;
+                       } else {
+                               nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
+                               nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
+                               pte += 2;
                        }
 
                        dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -158,8 +162,8 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
        dev_priv->engine.instmem.flush(nvbe->dev);
 
        if (dev_priv->card_type == NV_50) {
-               nv50_vm_flush(dev, 5);
-               nv50_vm_flush(dev, 0);
+               dev_priv->engine.fifo.tlb_flush(dev);
+               dev_priv->engine.graph.tlb_flush(dev);
        }
 
        nvbe->bound = false;
@@ -220,7 +224,11 @@ nouveau_sgdma_init(struct drm_device *dev)
        int i, ret;
 
        if (dev_priv->card_type < NV_50) {
-               aper_size = (64 * 1024 * 1024);
+               if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
+                       aper_size = 64 * 1024 * 1024;
+               else
+                       aper_size = 512 * 1024 * 1024;
+
                obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
                obj_size += 8; /* ctxdma header */
        } else {
@@ -230,7 +238,6 @@ nouveau_sgdma_init(struct drm_device *dev)
        }
 
        ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
-                                     NVOBJ_FLAG_ALLOW_NO_REFS |
                                      NVOBJ_FLAG_ZERO_ALLOC |
                                      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
        if (ret) {
@@ -239,9 +246,9 @@ nouveau_sgdma_init(struct drm_device *dev)
        }
 
        dev_priv->gart_info.sg_dummy_page =
-               alloc_page(GFP_KERNEL|__GFP_DMA32);
+               alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
        if (!dev_priv->gart_info.sg_dummy_page) {
-               nouveau_gpuobj_del(dev, &gpuobj);
+               nouveau_gpuobj_ref(NULL, &gpuobj);
                return -ENOMEM;
        }
 
@@ -250,29 +257,34 @@ nouveau_sgdma_init(struct drm_device *dev)
                pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
        if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
-               nouveau_gpuobj_del(dev, &gpuobj);
+               nouveau_gpuobj_ref(NULL, &gpuobj);
                return -EFAULT;
        }
 
        if (dev_priv->card_type < NV_50) {
+               /* special case, allocated from global instmem heap so
+                * cinst is invalid, we use it on all channels though so
+                * cinst needs to be valid, set it the same as pinst
+                */
+               gpuobj->cinst = gpuobj->pinst;
+
                /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
                 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
                 * on those cards? */
-               nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
-                                      (1 << 12) /* PT present */ |
-                                      (0 << 13) /* PT *not* linear */ |
-                                      (NV_DMA_ACCESS_RW  << 14) |
-                                      (NV_DMA_TARGET_PCI << 16));
-               nv_wo32(dev, gpuobj, 1, aper_size - 1);
+               nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+                                  (1 << 12) /* PT present */ |
+                                  (0 << 13) /* PT *not* linear */ |
+                                  (NV_DMA_ACCESS_RW  << 14) |
+                                  (NV_DMA_TARGET_PCI << 16));
+               nv_wo32(gpuobj, 4, aper_size - 1);
                for (i = 2; i < 2 + (aper_size >> 12); i++) {
-                       nv_wo32(dev, gpuobj, i,
-                                   dev_priv->gart_info.sg_dummy_bus | 3);
+                       nv_wo32(gpuobj, i * 4,
+                               dev_priv->gart_info.sg_dummy_bus | 3);
                }
        } else {
                for (i = 0; i < obj_size; i += 8) {
-                       nv_wo32(dev, gpuobj, (i+0)/4,
-                                   dev_priv->gart_info.sg_dummy_bus | 0x21);
-                       nv_wo32(dev, gpuobj, (i+4)/4, 0);
+                       nv_wo32(gpuobj, i + 0, 0x00000000);
+                       nv_wo32(gpuobj, i + 4, 0x00000000);
                }
        }
        dev_priv->engine.instmem.flush(dev);
@@ -298,7 +310,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
                dev_priv->gart_info.sg_dummy_bus = 0;
        }
 
-       nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+       nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
 }
 
 int
@@ -308,9 +320,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
        struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
        int pte;
 
-       pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+       pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
        if (dev_priv->card_type < NV_50) {
-               *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+               *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
                return 0;
        }