2 * Copyright (C) 2007 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include "nouveau_drv.h"
30 #include "nouveau_ramht.h"
31 #include "nouveau_grctx.h"
32 #include "nouveau_dma.h"
35 static int nv50_graph_register(struct drm_device *);
38 nv50_graph_init_reset(struct drm_device *dev)
40 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
44 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
45 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
49 nv50_graph_init_intr(struct drm_device *dev)
53 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
54 nv_wr32(dev, 0x400138, 0xffffffff);
55 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
59 nv50_graph_init_regs__nv(struct drm_device *dev)
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 uint32_t units = nv_rd32(dev, 0x1540);
67 nv_wr32(dev, 0x400804, 0xc0000000);
68 nv_wr32(dev, 0x406800, 0xc0000000);
69 nv_wr32(dev, 0x400c04, 0xc0000000);
70 nv_wr32(dev, 0x401800, 0xc0000000);
71 nv_wr32(dev, 0x405018, 0xc0000000);
72 nv_wr32(dev, 0x402000, 0xc0000000);
74 for (i = 0; i < 16; i++) {
76 if (dev_priv->chipset < 0xa0) {
77 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
78 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
79 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
81 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
82 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
83 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
88 nv_wr32(dev, 0x400108, 0xffffffff);
90 nv_wr32(dev, 0x400824, 0x00004000);
91 nv_wr32(dev, 0x400500, 0x00010001);
95 nv50_graph_init_regs(struct drm_device *dev)
99 nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
100 (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
101 nv_wr32(dev, 0x402ca8, 0x800);
105 nv50_graph_init_ctxctl(struct drm_device *dev)
107 struct drm_nouveau_private *dev_priv = dev->dev_private;
108 struct nouveau_grctx ctx = {};
114 cp = kmalloc(512 * 4, GFP_KERNEL);
116 NV_ERROR(dev, "failed to allocate ctxprog\n");
117 dev_priv->engine.graph.accel_blocked = true;
122 ctx.mode = NOUVEAU_GRCTX_PROG;
124 ctx.ctxprog_max = 512;
125 if (!nv50_grctx_init(&ctx)) {
126 dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
128 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
129 for (i = 0; i < ctx.ctxprog_len; i++)
130 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
132 dev_priv->engine.graph.accel_blocked = true;
136 nv_wr32(dev, 0x400320, 4);
137 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
138 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
143 nv50_graph_init(struct drm_device *dev)
149 nv50_graph_init_reset(dev);
150 nv50_graph_init_regs__nv(dev);
151 nv50_graph_init_regs(dev);
153 ret = nv50_graph_init_ctxctl(dev);
157 ret = nv50_graph_register(dev);
160 nv50_graph_init_intr(dev);
165 nv50_graph_takedown(struct drm_device *dev)
171 nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
173 const uint32_t mask = 0x00010001;
176 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
178 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
181 struct nouveau_channel *
182 nv50_graph_channel(struct drm_device *dev)
184 struct drm_nouveau_private *dev_priv = dev->dev_private;
188 /* Be sure we're not in the middle of a context switch or bad things
189 * will happen, such as unloading the wrong pgraph context.
191 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000))
192 NV_ERROR(dev, "Ctxprog is still running\n");
194 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
195 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
197 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
199 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
200 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
202 if (chan && chan->ramin && chan->ramin->vinst == inst)
210 nv50_graph_create_context(struct nouveau_channel *chan)
212 struct drm_device *dev = chan->dev;
213 struct drm_nouveau_private *dev_priv = dev->dev_private;
214 struct nouveau_gpuobj *ramin = chan->ramin;
215 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
216 struct nouveau_grctx ctx = {};
219 NV_DEBUG(dev, "ch%d\n", chan->id);
221 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
222 NVOBJ_FLAG_ZERO_ALLOC |
223 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
227 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
228 nv_wo32(ramin, hdr + 0x00, 0x00190002);
229 nv_wo32(ramin, hdr + 0x04, chan->ramin_grctx->vinst +
230 pgraph->grctx_size - 1);
231 nv_wo32(ramin, hdr + 0x08, chan->ramin_grctx->vinst);
232 nv_wo32(ramin, hdr + 0x0c, 0);
233 nv_wo32(ramin, hdr + 0x10, 0);
234 nv_wo32(ramin, hdr + 0x14, 0x00010000);
237 ctx.mode = NOUVEAU_GRCTX_VALS;
238 ctx.data = chan->ramin_grctx;
239 nv50_grctx_init(&ctx);
241 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
243 dev_priv->engine.instmem.flush(dev);
248 nv50_graph_destroy_context(struct nouveau_channel *chan)
250 struct drm_device *dev = chan->dev;
251 struct drm_nouveau_private *dev_priv = dev->dev_private;
252 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
253 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
256 NV_DEBUG(dev, "ch%d\n", chan->id);
261 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
262 pgraph->fifo_access(dev, false);
264 if (pgraph->channel(dev) == chan)
265 pgraph->unload_context(dev);
267 for (i = hdr; i < hdr + 24; i += 4)
268 nv_wo32(chan->ramin, i, 0);
269 dev_priv->engine.instmem.flush(dev);
271 pgraph->fifo_access(dev, true);
272 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
274 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
278 nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
280 uint32_t fifo = nv_rd32(dev, 0x400500);
282 nv_wr32(dev, 0x400500, fifo & ~1);
283 nv_wr32(dev, 0x400784, inst);
284 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
285 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
286 nv_wr32(dev, 0x400040, 0xffffffff);
287 (void)nv_rd32(dev, 0x400040);
288 nv_wr32(dev, 0x400040, 0x00000000);
289 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
291 if (nouveau_wait_for_idle(dev))
292 nv_wr32(dev, 0x40032c, inst | (1<<31));
293 nv_wr32(dev, 0x400500, fifo);
299 nv50_graph_load_context(struct nouveau_channel *chan)
301 uint32_t inst = chan->ramin->vinst >> 12;
303 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
304 return nv50_graph_do_load_context(chan->dev, inst);
308 nv50_graph_unload_context(struct drm_device *dev)
312 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
313 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
315 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
317 nouveau_wait_for_idle(dev);
318 nv_wr32(dev, 0x400784, inst);
319 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
320 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
321 nouveau_wait_for_idle(dev);
323 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
328 nv50_graph_context_switch(struct drm_device *dev)
332 nv50_graph_unload_context(dev);
334 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
335 inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
336 nv50_graph_do_load_context(dev, inst);
338 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
339 NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
343 nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
344 u32 class, u32 mthd, u32 data)
346 struct nouveau_gpuobj *gpuobj;
348 gpuobj = nouveau_ramht_find(chan, data);
352 if (nouveau_notifier_offset(gpuobj, NULL))
355 chan->nvsw.vblsem = gpuobj;
356 chan->nvsw.vblsem_offset = ~0;
361 nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
362 u32 class, u32 mthd, u32 data)
364 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
367 chan->nvsw.vblsem_offset = data >> 2;
372 nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
373 u32 class, u32 mthd, u32 data)
375 chan->nvsw.vblsem_rval = data;
380 nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
381 u32 class, u32 mthd, u32 data)
383 struct drm_device *dev = chan->dev;
384 struct drm_nouveau_private *dev_priv = dev->dev_private;
386 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
389 drm_vblank_get(dev, data);
391 chan->nvsw.vblsem_head = data;
392 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
398 nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
399 u32 class, u32 mthd, u32 data)
401 struct nouveau_page_flip_state s;
403 if (!nouveau_finish_page_flip(chan, &s)) {
404 /* XXX - Do something here */
411 nv50_graph_register(struct drm_device *dev)
413 struct drm_nouveau_private *dev_priv = dev->dev_private;
415 if (dev_priv->engine.graph.registered)
418 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
419 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
420 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
421 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
422 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
423 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
425 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
426 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
427 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
430 if (dev_priv->chipset == 0x50)
431 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
433 if (dev_priv->chipset < 0xa0)
434 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
436 switch (dev_priv->chipset) {
440 NVOBJ_CLASS(dev, 0x8397, GR);
445 NVOBJ_CLASS(dev, 0x8597, GR);
448 NVOBJ_CLASS(dev, 0x8697, GR);
454 NVOBJ_CLASS(dev, 0x50c0, GR);
455 if (dev_priv->chipset > 0xa0 &&
456 dev_priv->chipset != 0xaa &&
457 dev_priv->chipset != 0xac)
458 NVOBJ_CLASS(dev, 0x85c0, GR);
460 dev_priv->engine.graph.registered = true;
465 nv50_graph_tlb_flush(struct drm_device *dev)
467 nv50_vm_flush(dev, 0);
471 nv86_graph_tlb_flush(struct drm_device *dev)
473 struct drm_nouveau_private *dev_priv = dev->dev_private;
474 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
475 bool idle, timeout = false;
480 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
481 nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
483 start = ptimer->read(dev);
487 for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
492 for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
497 for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
501 } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
504 NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
505 "0x%08x 0x%08x 0x%08x 0x%08x\n",
506 nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
507 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
510 nv50_vm_flush(dev, 0);
512 nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
513 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);