2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
10 struct nouveau_sgdma_be {
11 struct ttm_backend backend;
12 struct drm_device *dev;
22 nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23 struct page **pages, struct page *dummy_read_page)
25 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26 struct drm_device *dev = nvbe->dev;
28 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
33 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
39 nvbe->pages[nvbe->nr_pages] =
40 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42 if (pci_dma_mapping_error(dev->pdev,
43 nvbe->pages[nvbe->nr_pages])) {
55 nouveau_sgdma_clear(struct ttm_backend *be)
57 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58 struct drm_device *dev;
60 if (nvbe && nvbe->pages) {
67 while (nvbe->nr_pages--) {
68 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
77 static inline unsigned
78 nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
83 if (dev_priv->card_type < NV_50)
90 nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
92 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
93 struct drm_device *dev = nvbe->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
100 pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
101 nvbe->pte_start = pte;
102 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i];
104 uint32_t offset_l = lower_32_bits(dma_offset);
105 uint32_t offset_h = upper_32_bits(dma_offset);
107 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108 if (dev_priv->card_type < NV_50) {
109 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
112 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
113 nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
117 dma_offset += NV_CTXDMA_PAGE_SIZE;
120 dev_priv->engine.instmem.flush(nvbe->dev);
122 if (dev_priv->card_type == NV_50) {
123 nv50_vm_flush(dev, 5); /* PGRAPH */
124 nv50_vm_flush(dev, 0); /* PFIFO */
132 nouveau_sgdma_unbind(struct ttm_backend *be)
134 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
135 struct drm_device *dev = nvbe->dev;
136 struct drm_nouveau_private *dev_priv = dev->dev_private;
137 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
145 pte = nvbe->pte_start;
146 for (i = 0; i < nvbe->nr_pages; i++) {
147 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
149 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
150 if (dev_priv->card_type < NV_50) {
151 nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
154 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
155 nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
159 dma_offset += NV_CTXDMA_PAGE_SIZE;
162 dev_priv->engine.instmem.flush(nvbe->dev);
164 if (dev_priv->card_type == NV_50) {
165 nv50_vm_flush(dev, 5);
166 nv50_vm_flush(dev, 0);
174 nouveau_sgdma_destroy(struct ttm_backend *be)
176 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
179 NV_DEBUG(nvbe->dev, "\n");
189 static struct ttm_backend_func nouveau_sgdma_backend = {
190 .populate = nouveau_sgdma_populate,
191 .clear = nouveau_sgdma_clear,
192 .bind = nouveau_sgdma_bind,
193 .unbind = nouveau_sgdma_unbind,
194 .destroy = nouveau_sgdma_destroy
198 nouveau_sgdma_init_ttm(struct drm_device *dev)
200 struct drm_nouveau_private *dev_priv = dev->dev_private;
201 struct nouveau_sgdma_be *nvbe;
203 if (!dev_priv->gart_info.sg_ctxdma)
206 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
212 nvbe->backend.func = &nouveau_sgdma_backend;
214 return &nvbe->backend;
218 nouveau_sgdma_init(struct drm_device *dev)
220 struct drm_nouveau_private *dev_priv = dev->dev_private;
221 struct pci_dev *pdev = dev->pdev;
222 struct nouveau_gpuobj *gpuobj = NULL;
223 uint32_t aper_size, obj_size;
226 if (dev_priv->card_type < NV_50) {
227 aper_size = (64 * 1024 * 1024);
228 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
229 obj_size += 8; /* ctxdma header */
231 /* 1 entire VM page table */
232 aper_size = (512 * 1024 * 1024);
233 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
236 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
237 NVOBJ_FLAG_ZERO_ALLOC |
238 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
240 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
244 dev_priv->gart_info.sg_dummy_page =
245 alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
246 if (!dev_priv->gart_info.sg_dummy_page) {
247 nouveau_gpuobj_ref(NULL, &gpuobj);
251 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
252 dev_priv->gart_info.sg_dummy_bus =
253 pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
254 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
255 if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
256 nouveau_gpuobj_ref(NULL, &gpuobj);
260 if (dev_priv->card_type < NV_50) {
261 /* special case, allocated from global instmem heap so
262 * cinst is invalid, we use it on all channels though so
263 * cinst needs to be valid, set it the same as pinst
265 gpuobj->cinst = gpuobj->pinst;
267 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
268 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
270 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
271 (1 << 12) /* PT present */ |
272 (0 << 13) /* PT *not* linear */ |
273 (NV_DMA_ACCESS_RW << 14) |
274 (NV_DMA_TARGET_PCI << 16));
275 nv_wo32(gpuobj, 4, aper_size - 1);
276 for (i = 2; i < 2 + (aper_size >> 12); i++) {
277 nv_wo32(gpuobj, i * 4,
278 dev_priv->gart_info.sg_dummy_bus | 3);
281 for (i = 0; i < obj_size; i += 8) {
282 nv_wo32(gpuobj, i + 0, 0x00000000);
283 nv_wo32(gpuobj, i + 4, 0x00000000);
286 dev_priv->engine.instmem.flush(dev);
288 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
289 dev_priv->gart_info.aper_base = 0;
290 dev_priv->gart_info.aper_size = aper_size;
291 dev_priv->gart_info.sg_ctxdma = gpuobj;
296 nouveau_sgdma_takedown(struct drm_device *dev)
298 struct drm_nouveau_private *dev_priv = dev->dev_private;
300 if (dev_priv->gart_info.sg_dummy_page) {
301 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
302 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
303 unlock_page(dev_priv->gart_info.sg_dummy_page);
304 __free_page(dev_priv->gart_info.sg_dummy_page);
305 dev_priv->gart_info.sg_dummy_page = NULL;
306 dev_priv->gart_info.sg_dummy_bus = 0;
309 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
313 nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
315 struct drm_nouveau_private *dev_priv = dev->dev_private;
316 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
319 pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
320 if (dev_priv->card_type < NV_50) {
321 *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
325 NV_ERROR(dev, "Unimplemented on NV50\n");