2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_mm.h"
30 #define NVE0_FIFO_ENGINE_NUM 32
32 static void nve0_fifo_isr(struct drm_device *);
34 struct nve0_fifo_engine {
35 struct nouveau_gpuobj *playlist[2];
39 struct nve0_fifo_priv {
40 struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
42 struct nouveau_gpuobj *mem;
43 struct nouveau_vma bar;
48 struct nve0_fifo_chan {
49 struct nouveau_gpuobj *ramfc;
54 nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
56 struct drm_nouveau_private *dev_priv = dev->dev_private;
57 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
58 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
59 struct nve0_fifo_priv *priv = pfifo->priv;
60 struct nve0_fifo_engine *peng = &priv->engine[engine];
61 struct nouveau_gpuobj *cur;
62 u32 match = (engine << 16) | 0x00000001;
65 cur = peng->playlist[peng->cur_playlist];
66 if (unlikely(cur == NULL)) {
67 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
69 NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
73 peng->playlist[peng->cur_playlist] = cur;
76 peng->cur_playlist = !peng->cur_playlist;
78 for (i = 0, p = 0; i < pfifo->channels; i++) {
79 u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
82 nv_wo32(cur, p + 0, i);
83 nv_wo32(cur, p + 4, 0x00000000);
88 nv_wr32(dev, 0x002270, cur->vinst >> 12);
89 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
90 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
91 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
95 nve0_fifo_create_context(struct nouveau_channel *chan)
97 struct drm_device *dev = chan->dev;
98 struct drm_nouveau_private *dev_priv = dev->dev_private;
99 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
100 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
101 struct nve0_fifo_priv *priv = pfifo->priv;
102 struct nve0_fifo_chan *fifoch;
103 u64 usermem = priv->user.mem->vinst + chan->id * 512;
104 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
107 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
108 if (!chan->fifo_priv)
110 fifoch = chan->fifo_priv;
111 fifoch->engine = 0; /* PGRAPH */
113 /* allocate vram for control regs, map into polling area */
114 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
115 priv->user.bar.offset + (chan->id * 512), 512);
122 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
123 chan->ramin->vinst, 0x100,
124 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
128 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(usermem));
129 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(usermem));
130 nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
131 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
132 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
133 nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
134 upper_32_bits(ib_virt));
135 nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
136 nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
137 nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
138 nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
139 nv_wo32(fifoch->ramfc, 0xe4, 0x00000000);
140 nv_wo32(fifoch->ramfc, 0xe8, chan->id);
141 nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
142 nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
143 pinstmem->flush(dev);
145 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
146 (chan->ramin->vinst >> 12));
147 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
148 nve0_fifo_playlist_update(dev, fifoch->engine);
149 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
153 pfifo->destroy_context(chan);
158 nve0_fifo_destroy_context(struct nouveau_channel *chan)
160 struct nve0_fifo_chan *fifoch = chan->fifo_priv;
161 struct drm_device *dev = chan->dev;
166 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
167 nv_wr32(dev, 0x002634, chan->id);
168 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
169 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
170 nve0_fifo_playlist_update(dev, fifoch->engine);
171 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
178 nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
179 chan->fifo_priv = NULL;
184 nve0_fifo_load_context(struct nouveau_channel *chan)
190 nve0_fifo_unload_context(struct drm_device *dev)
192 struct drm_nouveau_private *dev_priv = dev->dev_private;
193 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
196 for (i = 0; i < pfifo->channels; i++) {
197 if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
200 nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
201 nv_wr32(dev, 0x002634, i);
202 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
203 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
204 i, nv_rd32(dev, 0x002634));
213 nve0_fifo_destroy(struct drm_device *dev)
215 struct drm_nouveau_private *dev_priv = dev->dev_private;
216 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
217 struct nve0_fifo_priv *priv;
224 nouveau_vm_put(&priv->user.bar);
225 nouveau_gpuobj_ref(NULL, &priv->user.mem);
227 for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
228 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
229 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
235 nve0_fifo_takedown(struct drm_device *dev)
237 nv_wr32(dev, 0x002140, 0x00000000);
238 nve0_fifo_destroy(dev);
242 nve0_fifo_create(struct drm_device *dev)
244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
246 struct nve0_fifo_priv *priv;
249 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
254 ret = nouveau_gpuobj_new(dev, NULL, pfifo->channels * 512, 0x1000,
255 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
259 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
260 12, NV_MEM_ACCESS_RW, &priv->user.bar);
264 nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
266 nouveau_irq_register(dev, 8, nve0_fifo_isr);
270 nve0_fifo_destroy(dev);
275 nve0_fifo_init(struct drm_device *dev)
277 struct drm_nouveau_private *dev_priv = dev->dev_private;
278 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
279 struct nouveau_channel *chan;
280 struct nve0_fifo_chan *fifoch;
281 struct nve0_fifo_priv *priv;
285 ret = nve0_fifo_create(dev);
291 /* reset PFIFO, enable all available PSUBFIFO areas */
292 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
293 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
294 nv_wr32(dev, 0x000204, 0xffffffff);
296 priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
297 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
300 for (i = 0; i < priv->spoon_nr; i++) {
301 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
302 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
303 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
306 nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
308 nv_wr32(dev, 0x002a00, 0xffffffff);
309 nv_wr32(dev, 0x002100, 0xffffffff);
310 nv_wr32(dev, 0x002140, 0xbfffffff);
312 /* restore PFIFO context table */
313 for (i = 0; i < pfifo->channels; i++) {
314 chan = dev_priv->channels.ptr[i];
315 if (!chan || !chan->fifo_priv)
317 fifoch = chan->fifo_priv;
319 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
320 (chan->ramin->vinst >> 12));
321 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
322 nve0_fifo_playlist_update(dev, fifoch->engine);
323 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
329 struct nouveau_enum nve0_fifo_fault_unit[] = {
333 struct nouveau_enum nve0_fifo_fault_reason[] = {
334 { 0x00, "PT_NOT_PRESENT" },
335 { 0x01, "PT_TOO_SHORT" },
336 { 0x02, "PAGE_NOT_PRESENT" },
337 { 0x03, "VM_LIMIT_EXCEEDED" },
338 { 0x04, "NO_CHANNEL" },
339 { 0x05, "PAGE_SYSTEM_ONLY" },
340 { 0x06, "PAGE_READ_ONLY" },
341 { 0x0a, "COMPRESSED_SYSRAM" },
342 { 0x0c, "INVALID_STORAGE_TYPE" },
346 struct nouveau_enum nve0_fifo_fault_hubclient[] = {
350 struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
354 struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
355 { 0x00200000, "ILLEGAL_MTHD" },
356 { 0x00800000, "EMPTY_SUBC" },
361 nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
363 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
364 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
365 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
366 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
367 u32 client = (stat & 0x00001f00) >> 8;
369 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
370 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
371 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
373 nouveau_enum_print(nve0_fifo_fault_unit, unit);
374 if (stat & 0x00000040) {
376 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
378 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
379 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
381 printk(" on channel 0x%010llx\n", (u64)inst << 12);
385 nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
387 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
388 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
389 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
390 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
391 u32 subc = (addr & 0x00070000);
392 u32 mthd = (addr & 0x00003ffc);
394 NV_INFO(dev, "PSUBFIFO %d:", unit);
395 nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
396 NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
397 unit, chid, subc, mthd, data);
399 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
400 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
404 nve0_fifo_isr(struct drm_device *dev)
406 u32 stat = nv_rd32(dev, 0x002100);
408 if (stat & 0x00000100) {
409 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
410 nv_wr32(dev, 0x002100, 0x00000100);
414 if (stat & 0x10000000) {
415 u32 units = nv_rd32(dev, 0x00259c);
420 nve0_fifo_isr_vm_fault(dev, i);
424 nv_wr32(dev, 0x00259c, units);
428 if (stat & 0x20000000) {
429 u32 units = nv_rd32(dev, 0x0025a0);
434 nve0_fifo_isr_subfifo_intr(dev, i);
438 nv_wr32(dev, 0x0025a0, units);
442 if (stat & 0x40000000) {
443 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
444 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
449 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
450 nv_wr32(dev, 0x002100, stat);
451 nv_wr32(dev, 0x002140, 0);