2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "changk104.h"
27 #include <core/client.h>
28 #include <core/enum.h>
29 #include <core/gpuobj.h>
30 #include <subdev/bar.h>
31 #include <engine/sw.h>
33 #include <nvif/class.h>
36 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
38 struct nvkm_device *device = fifo->engine.subdev.device;
39 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
43 gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
45 struct nvkm_device *device = fifo->engine.subdev.device;
46 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
50 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
52 struct gk104_fifo_chan *chan;
53 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
54 struct nvkm_device *device = subdev->device;
55 struct nvkm_memory *mem;
59 mutex_lock(&subdev->mutex);
60 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
61 fifo->runlist[runl].next = !fifo->runlist[runl].next;
64 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
65 nvkm_wo32(mem, (nr * 8) + 0, chan->base.chid);
66 nvkm_wo32(mem, (nr * 8) + 4, 0x00000000);
71 if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM)
76 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
78 nvkm_wr32(device, 0x002274, (runl << 20) | nr);
80 if (wait_event_timeout(fifo->runlist[runl].wait,
81 !(nvkm_rd32(device, 0x002284 + (runl * 0x08))
83 msecs_to_jiffies(2000)) == 0)
84 nvkm_error(subdev, "runlist %d update timeout\n", runl);
85 mutex_unlock(&subdev->mutex);
89 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
91 mutex_lock(&fifo->base.engine.subdev.mutex);
92 list_del_init(&chan->head);
93 mutex_unlock(&fifo->base.engine.subdev.mutex);
97 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
99 mutex_lock(&fifo->base.engine.subdev.mutex);
100 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
101 mutex_unlock(&fifo->base.engine.subdev.mutex);
105 gk104_fifo_recover_work(struct work_struct *w)
107 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
108 struct nvkm_device *device = fifo->base.engine.subdev.device;
109 struct nvkm_engine *engine;
111 u32 engm, runm, todo;
114 spin_lock_irqsave(&fifo->base.lock, flags);
115 runm = fifo->recover.runm;
116 engm = fifo->recover.engm;
117 fifo->recover.engm = 0;
118 fifo->recover.runm = 0;
119 spin_unlock_irqrestore(&fifo->base.lock, flags);
121 nvkm_mask(device, 0x002630, runm, runm);
123 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
124 if ((engine = fifo->engine[engn].engine)) {
125 nvkm_subdev_fini(&engine->subdev, false);
126 WARN_ON(nvkm_subdev_init(&engine->subdev));
130 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
131 gk104_fifo_runlist_commit(fifo, runl);
133 nvkm_wr32(device, 0x00262c, runm);
134 nvkm_mask(device, 0x002630, runm, 0x00000000);
138 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
139 struct gk104_fifo_chan *chan)
141 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
142 struct nvkm_device *device = subdev->device;
143 u32 chid = chan->base.chid;
146 nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
147 nvkm_subdev_name[engine->subdev.index], chid);
148 assert_spin_locked(&fifo->base.lock);
150 nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
151 list_del_init(&chan->head);
154 for (engn = 0; engn < fifo->engine_nr; engn++) {
155 if (fifo->engine[engn].engine == engine) {
156 fifo->recover.engm |= BIT(engn);
161 fifo->recover.runm |= BIT(chan->runl);
162 schedule_work(&fifo->recover.work);
165 static const struct nvkm_enum
166 gk104_fifo_bind_reason[] = {
167 { 0x01, "BIND_NOT_UNBOUND" },
168 { 0x02, "SNOOP_WITHOUT_BAR1" },
169 { 0x03, "UNBIND_WHILE_RUNNING" },
170 { 0x05, "INVALID_RUNLIST" },
171 { 0x06, "INVALID_CTX_TGT" },
172 { 0x0b, "UNBIND_WHILE_PARKED" },
177 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
179 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
180 struct nvkm_device *device = subdev->device;
181 u32 intr = nvkm_rd32(device, 0x00252c);
182 u32 code = intr & 0x000000ff;
183 const struct nvkm_enum *en =
184 nvkm_enum_find(gk104_fifo_bind_reason, code);
186 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
189 static const struct nvkm_enum
190 gk104_fifo_sched_reason[] = {
191 { 0x0a, "CTXSW_TIMEOUT" },
196 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
198 struct nvkm_device *device = fifo->base.engine.subdev.device;
199 struct gk104_fifo_chan *chan;
203 spin_lock_irqsave(&fifo->base.lock, flags);
204 for (engn = 0; engn < fifo->engine_nr; engn++) {
205 struct nvkm_engine *engine = fifo->engine[engn].engine;
206 int runl = fifo->engine[engn].runl;
207 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
208 u32 busy = (stat & 0x80000000);
209 u32 next = (stat & 0x0fff0000) >> 16;
210 u32 chsw = (stat & 0x00008000);
211 u32 save = (stat & 0x00004000);
212 u32 load = (stat & 0x00002000);
213 u32 prev = (stat & 0x00000fff);
214 u32 chid = load ? next : prev;
220 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
221 if (chan->base.chid == chid && engine) {
222 gk104_fifo_recover(fifo, engine, chan);
227 spin_unlock_irqrestore(&fifo->base.lock, flags);
231 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
233 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
234 struct nvkm_device *device = subdev->device;
235 u32 intr = nvkm_rd32(device, 0x00254c);
236 u32 code = intr & 0x000000ff;
237 const struct nvkm_enum *en =
238 nvkm_enum_find(gk104_fifo_sched_reason, code);
240 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
244 gk104_fifo_intr_sched_ctxsw(fifo);
252 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
254 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
255 struct nvkm_device *device = subdev->device;
256 u32 stat = nvkm_rd32(device, 0x00256c);
257 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
258 nvkm_wr32(device, 0x00256c, stat);
262 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
264 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
265 struct nvkm_device *device = subdev->device;
266 u32 stat = nvkm_rd32(device, 0x00259c);
267 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
270 static const struct nvkm_enum
271 gk104_fifo_fault_engine[] = {
272 { 0x00, "GR", NULL, NVKM_ENGINE_GR },
273 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
274 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
275 { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
276 { 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
277 { 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
278 { 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
279 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
280 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
282 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
283 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
284 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
286 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
287 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
291 static const struct nvkm_enum
292 gk104_fifo_fault_reason[] = {
294 { 0x01, "PDE_SIZE" },
296 { 0x03, "VA_LIMIT_VIOLATION" },
297 { 0x04, "UNBOUND_INST_BLOCK" },
298 { 0x05, "PRIV_VIOLATION" },
299 { 0x06, "RO_VIOLATION" },
300 { 0x07, "WO_VIOLATION" },
301 { 0x08, "PITCH_MASK_VIOLATION" },
302 { 0x09, "WORK_CREATION" },
303 { 0x0a, "UNSUPPORTED_APERTURE" },
304 { 0x0b, "COMPRESSION_FAILURE" },
305 { 0x0c, "UNSUPPORTED_KIND" },
306 { 0x0d, "REGION_VIOLATION" },
307 { 0x0e, "BOTH_PTES_VALID" },
308 { 0x0f, "INFO_TYPE_POISONED" },
312 static const struct nvkm_enum
313 gk104_fifo_fault_hubclient[] = {
321 { 0x07, "HOST_CPU" },
322 { 0x08, "HOST_CPU_NB" },
333 { 0x13, "RASTERTWOD" },
349 static const struct nvkm_enum
350 gk104_fifo_fault_gpcclient[] = {
351 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
352 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
353 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
354 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
362 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
363 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
364 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
365 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
367 { 0x20, "LTP_UTLB_0" },
368 { 0x21, "LTP_UTLB_1" },
369 { 0x22, "LTP_UTLB_2" },
370 { 0x23, "LTP_UTLB_3" },
371 { 0x24, "GPC_RGG_UTLB" },
376 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
378 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
379 struct nvkm_device *device = subdev->device;
380 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
381 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
382 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
383 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
384 u32 gpc = (stat & 0x1f000000) >> 24;
385 u32 client = (stat & 0x00001f00) >> 8;
386 u32 write = (stat & 0x00000080);
387 u32 hub = (stat & 0x00000040);
388 u32 reason = (stat & 0x0000000f);
389 const struct nvkm_enum *er, *eu, *ec;
390 struct nvkm_engine *engine = NULL;
391 struct nvkm_fifo_chan *chan;
395 er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
396 eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
398 ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
400 ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
401 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
404 if (eu && eu->data2) {
406 case NVKM_SUBDEV_BAR:
407 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
409 case NVKM_SUBDEV_INSTMEM:
410 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
412 case NVKM_ENGINE_IFB:
413 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
416 engine = nvkm_device_engine(device, eu->data2);
421 chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
424 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
425 "reason %02x [%s] on channel %d [%010llx %s]\n",
426 write ? "write" : "read", (u64)vahi << 32 | valo,
427 unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
428 reason, er ? er->name : "", chan ? chan->chid : -1,
430 chan ? chan->object.client->name : "unknown");
433 gk104_fifo_recover(fifo, engine, (void *)chan);
434 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
437 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
438 { 0x00000001, "MEMREQ" },
439 { 0x00000002, "MEMACK_TIMEOUT" },
440 { 0x00000004, "MEMACK_EXTRA" },
441 { 0x00000008, "MEMDAT_TIMEOUT" },
442 { 0x00000010, "MEMDAT_EXTRA" },
443 { 0x00000020, "MEMFLUSH" },
444 { 0x00000040, "MEMOP" },
445 { 0x00000080, "LBCONNECT" },
446 { 0x00000100, "LBREQ" },
447 { 0x00000200, "LBACK_TIMEOUT" },
448 { 0x00000400, "LBACK_EXTRA" },
449 { 0x00000800, "LBDAT_TIMEOUT" },
450 { 0x00001000, "LBDAT_EXTRA" },
451 { 0x00002000, "GPFIFO" },
452 { 0x00004000, "GPPTR" },
453 { 0x00008000, "GPENTRY" },
454 { 0x00010000, "GPCRC" },
455 { 0x00020000, "PBPTR" },
456 { 0x00040000, "PBENTRY" },
457 { 0x00080000, "PBCRC" },
458 { 0x00100000, "XBARCONNECT" },
459 { 0x00200000, "METHOD" },
460 { 0x00400000, "METHODCRC" },
461 { 0x00800000, "DEVICE" },
462 { 0x02000000, "SEMAPHORE" },
463 { 0x04000000, "ACQUIRE" },
464 { 0x08000000, "PRI" },
465 { 0x20000000, "NO_CTXSW_SEG" },
466 { 0x40000000, "PBSEG" },
467 { 0x80000000, "SIGNATURE" },
472 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
474 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
475 struct nvkm_device *device = subdev->device;
476 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
477 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
478 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
479 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
480 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
481 u32 subc = (addr & 0x00070000) >> 16;
482 u32 mthd = (addr & 0x00003ffc);
484 struct nvkm_fifo_chan *chan;
488 if (stat & 0x00800000) {
490 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
495 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
498 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
499 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
500 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
501 "subc %d mthd %04x data %08x\n",
502 unit, show, msg, chid, chan ? chan->inst->addr : 0,
503 chan ? chan->object.client->name : "unknown",
505 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
508 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
511 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
512 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
513 { 0x00000002, "HCE_RE_ALIGNB" },
514 { 0x00000004, "HCE_PRIV" },
515 { 0x00000008, "HCE_ILLEGAL_MTHD" },
516 { 0x00000010, "HCE_ILLEGAL_CLASS" },
521 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
523 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
524 struct nvkm_device *device = subdev->device;
525 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
526 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
527 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
531 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
532 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
533 unit, stat, msg, chid,
534 nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
535 nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
538 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
542 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
544 struct nvkm_device *device = fifo->base.engine.subdev.device;
545 u32 mask = nvkm_rd32(device, 0x002a00);
547 int runl = __ffs(mask);
548 wake_up(&fifo->runlist[runl].wait);
549 nvkm_wr32(device, 0x002a00, 1 << runl);
550 mask &= ~(1 << runl);
555 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
557 nvkm_fifo_uevent(&fifo->base);
561 gk104_fifo_intr(struct nvkm_fifo *base)
563 struct gk104_fifo *fifo = gk104_fifo(base);
564 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
565 struct nvkm_device *device = subdev->device;
566 u32 mask = nvkm_rd32(device, 0x002140);
567 u32 stat = nvkm_rd32(device, 0x002100) & mask;
569 if (stat & 0x00000001) {
570 gk104_fifo_intr_bind(fifo);
571 nvkm_wr32(device, 0x002100, 0x00000001);
575 if (stat & 0x00000010) {
576 nvkm_error(subdev, "PIO_ERROR\n");
577 nvkm_wr32(device, 0x002100, 0x00000010);
581 if (stat & 0x00000100) {
582 gk104_fifo_intr_sched(fifo);
583 nvkm_wr32(device, 0x002100, 0x00000100);
587 if (stat & 0x00010000) {
588 gk104_fifo_intr_chsw(fifo);
589 nvkm_wr32(device, 0x002100, 0x00010000);
593 if (stat & 0x00800000) {
594 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
595 nvkm_wr32(device, 0x002100, 0x00800000);
599 if (stat & 0x01000000) {
600 nvkm_error(subdev, "LB_ERROR\n");
601 nvkm_wr32(device, 0x002100, 0x01000000);
605 if (stat & 0x08000000) {
606 gk104_fifo_intr_dropped_fault(fifo);
607 nvkm_wr32(device, 0x002100, 0x08000000);
611 if (stat & 0x10000000) {
612 u32 mask = nvkm_rd32(device, 0x00259c);
614 u32 unit = __ffs(mask);
615 gk104_fifo_intr_fault(fifo, unit);
616 nvkm_wr32(device, 0x00259c, (1 << unit));
617 mask &= ~(1 << unit);
622 if (stat & 0x20000000) {
623 u32 mask = nvkm_rd32(device, 0x0025a0);
625 u32 unit = __ffs(mask);
626 gk104_fifo_intr_pbdma_0(fifo, unit);
627 gk104_fifo_intr_pbdma_1(fifo, unit);
628 nvkm_wr32(device, 0x0025a0, (1 << unit));
629 mask &= ~(1 << unit);
634 if (stat & 0x40000000) {
635 gk104_fifo_intr_runlist(fifo);
639 if (stat & 0x80000000) {
640 nvkm_wr32(device, 0x002100, 0x80000000);
641 gk104_fifo_intr_engine(fifo);
646 nvkm_error(subdev, "INTR %08x\n", stat);
647 nvkm_mask(device, 0x002140, stat, 0x00000000);
648 nvkm_wr32(device, 0x002100, stat);
653 gk104_fifo_fini(struct nvkm_fifo *base)
655 struct gk104_fifo *fifo = gk104_fifo(base);
656 struct nvkm_device *device = fifo->base.engine.subdev.device;
657 flush_work(&fifo->recover.work);
658 /* allow mmu fault interrupts, even when we're not using fifo */
659 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
663 gk104_fifo_oneinit(struct nvkm_fifo *base)
665 struct gk104_fifo *fifo = gk104_fifo(base);
666 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
667 struct nvkm_device *device = subdev->device;
671 /* Determine number of PBDMAs by checking valid enable bits. */
672 nvkm_wr32(device, 0x000204, 0xffffffff);
673 fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
674 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
676 /* Read PBDMA->runlist(s) mapping from HW. */
677 if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL)))
680 for (i = 0; i < fifo->pbdma_nr; i++)
681 map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
683 /* Read device topology from HW. */
684 for (i = 0; i < 64; i++) {
685 int type = -1, pbid = -1, engidx = -1;
686 int engn = -1, runl = -1, intr = -1, mcen = -1;
691 data = nvkm_rd32(device, 0x022700 + (i * 0x04));
692 nvkm_trace(subdev, "%02x: %08x\n", i, data);
693 switch (data & 0x00000003) {
694 case 0x00000000: /* NOT_VALID */
696 case 0x00000001: /* DATA */
697 addr = (data & 0x00fff000);
698 fault = (data & 0x000000f8) >> 3;
700 case 0x00000002: /* ENUM */
701 if (data & 0x00000020)
702 engn = (data & 0x3c000000) >> 26;
703 if (data & 0x00000010)
704 runl = (data & 0x01e00000) >> 21;
705 if (data & 0x00000008)
706 intr = (data & 0x000f8000) >> 15;
707 if (data & 0x00000004)
708 mcen = (data & 0x00003e00) >> 9;
710 case 0x00000003: /* ENGINE_TYPE */
711 type = (data & 0x7ffffffc) >> 2;
714 } while ((data & 0x80000000) && ++i < 64);
719 /* Determine which PBDMA handles requests for this engine. */
720 for (j = 0; runl >= 0 && j < fifo->pbdma_nr; j++) {
721 if (map[j] & (1 << runl)) {
727 /* Translate engine type to NVKM engine identifier. */
729 case 0x00000000: engidx = NVKM_ENGINE_GR; break;
730 case 0x00000001: engidx = NVKM_ENGINE_CE0; break;
731 case 0x00000002: engidx = NVKM_ENGINE_CE1; break;
732 case 0x00000003: engidx = NVKM_ENGINE_CE2; break;
733 case 0x00000008: engidx = NVKM_ENGINE_MSPDEC; break;
734 case 0x00000009: engidx = NVKM_ENGINE_MSPPP; break;
735 case 0x0000000a: engidx = NVKM_ENGINE_MSVLD; break;
736 case 0x0000000b: engidx = NVKM_ENGINE_MSENC; break;
737 case 0x0000000c: engidx = NVKM_ENGINE_VIC; break;
738 case 0x0000000d: engidx = NVKM_ENGINE_SEC; break;
739 case 0x0000000e: engidx = NVKM_ENGINE_NVENC0; break;
740 case 0x0000000f: engidx = NVKM_ENGINE_NVENC1; break;
741 case 0x00000010: engidx = NVKM_ENGINE_NVDEC; break;
747 nvkm_debug(subdev, "%02x (%8s): engine %2d runlist %2d "
748 "pbdma %2d intr %2d reset %2d "
749 "fault %2d addr %06x\n", type,
750 engidx < 0 ? NULL : nvkm_subdev_name[engidx],
751 engn, runl, pbid, intr, mcen, fault, addr);
753 /* Mark the engine as supported if everything checks out. */
754 if (engn >= 0 && runl >= 0) {
755 fifo->engine[engn].engine = engidx < 0 ? NULL :
756 nvkm_device_engine(device, engidx);
757 fifo->engine[engn].runl = runl;
758 fifo->engine[engn].pbid = pbid;
759 fifo->engine_nr = max(fifo->engine_nr, engn + 1);
760 fifo->runlist[runl].engm |= 1 << engn;
761 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
767 for (i = 0; i < fifo->runlist_nr; i++) {
768 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
769 0x8000, 0x1000, false,
770 &fifo->runlist[i].mem[0]);
774 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
775 0x8000, 0x1000, false,
776 &fifo->runlist[i].mem[1]);
780 init_waitqueue_head(&fifo->runlist[i].wait);
781 INIT_LIST_HEAD(&fifo->runlist[i].chan);
784 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
785 fifo->base.nr * 0x200, 0x1000, true,
790 ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
795 nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
800 gk104_fifo_init(struct nvkm_fifo *base)
802 struct gk104_fifo *fifo = gk104_fifo(base);
803 struct nvkm_device *device = fifo->base.engine.subdev.device;
807 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
810 for (i = 0; i < fifo->pbdma_nr; i++) {
811 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
812 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
813 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
817 for (i = 0; i < fifo->pbdma_nr; i++) {
818 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
819 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
822 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
824 nvkm_wr32(device, 0x002100, 0xffffffff);
825 nvkm_wr32(device, 0x002140, 0x7fffffff);
829 gk104_fifo_dtor(struct nvkm_fifo *base)
831 struct gk104_fifo *fifo = gk104_fifo(base);
834 nvkm_vm_put(&fifo->user.bar);
835 nvkm_memory_del(&fifo->user.mem);
837 for (i = 0; i < fifo->runlist_nr; i++) {
838 nvkm_memory_del(&fifo->runlist[i].mem[1]);
839 nvkm_memory_del(&fifo->runlist[i].mem[0]);
846 gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
847 int index, int nr, struct nvkm_fifo **pfifo)
849 struct gk104_fifo *fifo;
851 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
853 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
854 *pfifo = &fifo->base;
856 return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
859 static const struct nvkm_fifo_func
861 .dtor = gk104_fifo_dtor,
862 .oneinit = gk104_fifo_oneinit,
863 .init = gk104_fifo_init,
864 .fini = gk104_fifo_fini,
865 .intr = gk104_fifo_intr,
866 .uevent_init = gk104_fifo_uevent_init,
867 .uevent_fini = gk104_fifo_uevent_fini,
869 &gk104_fifo_gpfifo_oclass,
875 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
877 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);