2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_dma.h"
29 #include "nv50_display.h"
32 nv50_evo_rd32(struct nouveau_object *object, u32 addr)
34 void __iomem *iomem = object->oclass->ofuncs->rd08;
35 return ioread32_native(iomem + addr);
39 nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
41 void __iomem *iomem = object->oclass->ofuncs->rd08;
42 iowrite32_native(data, iomem + addr);
46 nv50_evo_channel_del(struct nouveau_channel **pevo)
48 struct nouveau_channel *evo = *pevo;
54 nouveau_bo_unmap(evo->push.buffer);
55 nouveau_bo_ref(NULL, &evo->push.buffer);
58 iounmap(evo->object->oclass->ofuncs);
64 nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
65 u64 base, u64 size, struct nouveau_gpuobj **pobj)
67 struct drm_device *dev = evo->fence;
68 struct drm_nouveau_private *dev_priv = dev->dev_private;
69 struct nv50_display *disp = nv50_display(dev);
70 u32 dmao = disp->dmao;
71 u32 hash = disp->hash;
74 if (dev_priv->chipset < 0xc0) {
75 /* not supported on 0x50, specified in format mthd */
76 if (dev_priv->chipset == 0x50)
80 if (memtype & 0x80000000)
81 flags5 = 0x00000000; /* large pages */
86 nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
87 nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
88 nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
89 nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
91 nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
92 nv_wo32(disp->ramin, dmao + 0x14, flags5);
94 nv_wo32(disp->ramin, hash + 0x00, handle);
95 nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
104 nv50_evo_channel_new(struct drm_device *dev, int chid,
105 struct nouveau_channel **pevo)
107 struct nv50_display *disp = nv50_display(dev);
108 struct nouveau_channel *evo;
111 evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
121 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
124 ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
126 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
127 nv50_evo_channel_del(pevo);
131 ret = nouveau_bo_map(evo->push.buffer);
133 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
134 nv50_evo_channel_del(pevo);
138 evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
139 #ifdef NOUVEAU_OBJECT_MAGIC
140 evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
142 evo->object->parent = nv_object(disp->ramin)->parent;
143 evo->object->engine = nv_object(disp->ramin)->engine;
144 evo->object->oclass =
145 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
146 evo->object->oclass->ofuncs =
147 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
148 evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
149 evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
150 evo->object->oclass->ofuncs->rd08 =
151 ioremap(pci_resource_start(dev->pdev, 0) +
152 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
157 nv50_evo_channel_init(struct nouveau_channel *evo)
159 struct drm_device *dev = evo->fence;
160 int id = evo->handle, ret, i;
161 u64 pushbuf = evo->push.buffer->bo.offset;
164 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
165 if ((tmp & 0x009f0000) == 0x00020000)
166 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
168 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
169 if ((tmp & 0x003f0000) == 0x00030000)
170 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
172 /* initialise fifo */
173 nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
174 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
175 NV50_PDISPLAY_EVO_DMA_CB_VALID);
176 nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
177 nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
178 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
179 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
181 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
182 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
183 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
184 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
185 NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
186 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
190 /* enable error reporting on the channel */
191 nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
193 evo->dma.max = (4096/4) - 2;
196 evo->dma.cur = evo->dma.put;
197 evo->dma.free = evo->dma.max - evo->dma.cur;
199 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
203 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
210 nv50_evo_channel_fini(struct nouveau_channel *evo)
212 struct drm_device *dev = evo->fence;
213 int id = evo->handle;
215 nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
216 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
217 nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
218 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
219 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
220 NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
221 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
226 nv50_evo_destroy(struct drm_device *dev)
228 struct nv50_display *disp = nv50_display(dev);
231 for (i = 0; i < 2; i++) {
232 if (disp->crtc[i].sem.bo) {
233 nouveau_bo_unmap(disp->crtc[i].sem.bo);
234 nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
236 nv50_evo_channel_del(&disp->crtc[i].sync);
238 nv50_evo_channel_del(&disp->master);
239 nouveau_gpuobj_ref(NULL, &disp->ramin);
243 nv50_evo_create(struct drm_device *dev)
245 struct drm_nouveau_private *dev_priv = dev->dev_private;
246 struct nv50_display *disp = nv50_display(dev);
247 struct nouveau_channel *evo;
250 /* setup object management on it, any other evo channel will
251 * use this also as there's no per-channel support on the
254 ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
255 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
257 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
264 /* create primary evo channel, the one we use for modesetting
267 ret = nv50_evo_channel_new(dev, 0, &disp->master);
272 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
273 disp->ramin->addr + 0x2000, 0x1000, NULL);
277 /* create some default objects for the scanout memtypes we support */
278 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
279 0, nvfb_vram_size(dev), NULL);
283 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
284 0, nvfb_vram_size(dev), NULL);
288 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
289 (dev_priv->chipset < 0xc0 ? 0x7a : 0xfe),
290 0, nvfb_vram_size(dev), NULL);
294 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
295 (dev_priv->chipset < 0xc0 ? 0x70 : 0xfe),
296 0, nvfb_vram_size(dev), NULL);
300 /* create "display sync" channels and other structures we need
301 * to implement page flipping
303 for (i = 0; i < 2; i++) {
304 struct nv50_display_crtc *dispc = &disp->crtc[i];
307 ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
311 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
312 0, 0x0000, NULL, &dispc->sem.bo);
314 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
316 ret = nouveau_bo_map(dispc->sem.bo);
318 nouveau_bo_ref(NULL, &dispc->sem.bo);
319 offset = dispc->sem.bo->bo.offset;
325 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
330 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
331 0, nvfb_vram_size(dev), NULL);
335 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
336 (dev_priv->chipset < 0xc0 ?
338 0, nvfb_vram_size(dev), NULL);
342 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
343 (dev_priv->chipset < 0xc0 ?
345 0, nvfb_vram_size(dev), NULL);
349 for (j = 0; j < 4096; j += 4)
350 nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
351 dispc->sem.offset = 0;
357 nv50_evo_destroy(dev);
362 nv50_evo_init(struct drm_device *dev)
364 struct nv50_display *disp = nv50_display(dev);
367 ret = nv50_evo_channel_init(disp->master);
371 for (i = 0; i < 2; i++) {
372 ret = nv50_evo_channel_init(disp->crtc[i].sync);
381 nv50_evo_fini(struct drm_device *dev)
383 struct nv50_display *disp = nv50_display(dev);
386 for (i = 0; i < 2; i++) {
387 if (disp->crtc[i].sync)
388 nv50_evo_channel_fini(disp->crtc[i].sync);
392 nv50_evo_channel_fini(disp->master);