2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <core/client.h>
29 #include <core/enum.h>
30 #include <core/gpuobj.h>
31 #include <subdev/bios.h>
32 #include <subdev/bios/disp.h>
33 #include <subdev/bios/init.h>
34 #include <subdev/bios/pll.h>
35 #include <subdev/devinit.h>
36 #include <subdev/timer.h>
38 static const struct nvkm_disp_oclass *
39 nv50_disp_root_(struct nvkm_disp *base)
41 return nv50_disp(base)->func->root;
45 nv50_disp_outp_internal_crt_(struct nvkm_disp *base, int index,
46 struct dcb_output *dcb, struct nvkm_output **poutp)
48 struct nv50_disp *disp = nv50_disp(base);
49 return disp->func->outp.internal.crt(base, index, dcb, poutp);
53 nv50_disp_outp_internal_tmds_(struct nvkm_disp *base, int index,
54 struct dcb_output *dcb,
55 struct nvkm_output **poutp)
57 struct nv50_disp *disp = nv50_disp(base);
58 return disp->func->outp.internal.tmds(base, index, dcb, poutp);
62 nv50_disp_outp_internal_lvds_(struct nvkm_disp *base, int index,
63 struct dcb_output *dcb,
64 struct nvkm_output **poutp)
66 struct nv50_disp *disp = nv50_disp(base);
67 return disp->func->outp.internal.lvds(base, index, dcb, poutp);
71 nv50_disp_outp_internal_dp_(struct nvkm_disp *base, int index,
72 struct dcb_output *dcb, struct nvkm_output **poutp)
74 struct nv50_disp *disp = nv50_disp(base);
75 if (disp->func->outp.internal.dp)
76 return disp->func->outp.internal.dp(base, index, dcb, poutp);
81 nv50_disp_outp_external_tmds_(struct nvkm_disp *base, int index,
82 struct dcb_output *dcb,
83 struct nvkm_output **poutp)
85 struct nv50_disp *disp = nv50_disp(base);
86 if (disp->func->outp.external.tmds)
87 return disp->func->outp.external.tmds(base, index, dcb, poutp);
92 nv50_disp_outp_external_dp_(struct nvkm_disp *base, int index,
93 struct dcb_output *dcb, struct nvkm_output **poutp)
95 struct nv50_disp *disp = nv50_disp(base);
96 if (disp->func->outp.external.dp)
97 return disp->func->outp.external.dp(base, index, dcb, poutp);
102 nv50_disp_intr_(struct nvkm_disp *base)
104 struct nv50_disp *disp = nv50_disp(base);
105 disp->func->intr(disp);
109 nv50_disp_dtor_(struct nvkm_disp *base)
111 struct nv50_disp *disp = nv50_disp(base);
112 nvkm_event_fini(&disp->uevent);
116 static const struct nvkm_disp_func
118 .dtor = nv50_disp_dtor_,
119 .intr = nv50_disp_intr_,
120 .root = nv50_disp_root_,
121 .outp.internal.crt = nv50_disp_outp_internal_crt_,
122 .outp.internal.tmds = nv50_disp_outp_internal_tmds_,
123 .outp.internal.lvds = nv50_disp_outp_internal_lvds_,
124 .outp.internal.dp = nv50_disp_outp_internal_dp_,
125 .outp.external.tmds = nv50_disp_outp_external_tmds_,
126 .outp.external.dp = nv50_disp_outp_external_dp_,
130 nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
131 int index, int heads, struct nvkm_disp **pdisp)
133 struct nv50_disp *disp;
136 if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
138 INIT_WORK(&disp->supervisor, func->super);
140 *pdisp = &disp->base;
142 ret = nvkm_disp_ctor(&nv50_disp_, device, index, &disp->base);
146 for (i = 0; func->head.new && i < heads; i++) {
147 ret = func->head.new(&disp->base, i);
152 return nvkm_event_init(func->uevent, 1, 1 + (heads * 4), &disp->uevent);
155 static struct nvkm_output *
156 exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
157 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
158 struct nvbios_outp *info)
160 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
161 struct nvkm_bios *bios = subdev->device->bios;
162 struct nvkm_output *outp;
166 type = DCB_OUTPUT_ANALOG;
170 switch (ctrl & 0x00000f00) {
171 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
172 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
173 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
174 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
175 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
176 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
178 nvkm_error(subdev, "unknown SOR mc %08x\n", ctrl);
186 switch (ctrl & 0x00000f00) {
187 case 0x00000000: type |= disp->pior.type[or]; break;
189 nvkm_error(subdev, "unknown PIOR mc %08x\n", ctrl);
194 mask = 0x00c0 & (mask << 6);
195 mask |= 0x0001 << or;
196 mask |= 0x0100 << head;
198 list_for_each_entry(outp, &disp->base.outp, head) {
199 if ((outp->info.hasht & 0xff) == type &&
200 (outp->info.hashm & mask) == mask) {
201 *data = nvbios_outp_match(bios, outp->info.hasht, mask,
202 ver, hdr, cnt, len, info);
212 static struct nvkm_output *
213 exec_script(struct nv50_disp *disp, int head, int id)
215 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
216 struct nvkm_device *device = subdev->device;
217 struct nvkm_bios *bios = device->bios;
218 struct nvkm_output *outp;
219 struct nvbios_outp info;
220 u8 ver, hdr, cnt, len;
226 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
227 ctrl = nvkm_rd32(device, 0x610b5c + (i * 8));
230 if (!(ctrl & (1 << head))) {
231 if (device->chipset < 0x90 ||
232 device->chipset == 0x92 ||
233 device->chipset == 0xa0) {
238 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
239 ctrl = nvkm_rd32(device, reg + (i * 8));
244 if (!(ctrl & (1 << head))) {
245 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
246 ctrl = nvkm_rd32(device, 0x610b84 + (i * 8));
250 if (!(ctrl & (1 << head)))
254 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
256 struct nvbios_init init = {
259 .offset = info.script[id],
271 static struct nvkm_output *
272 exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
274 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
275 struct nvkm_device *device = subdev->device;
276 struct nvkm_bios *bios = device->bios;
277 struct nvkm_output *outp;
278 struct nvbios_outp info1;
279 struct nvbios_ocfg info2;
280 u8 ver, hdr, cnt, len;
286 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->dac.nr; i++)
287 ctrl = nvkm_rd32(device, 0x610b58 + (i * 8));
290 if (!(ctrl & (1 << head))) {
291 if (device->chipset < 0x90 ||
292 device->chipset == 0x92 ||
293 device->chipset == 0xa0) {
298 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->sor.nr; i++)
299 ctrl = nvkm_rd32(device, reg + (i * 8));
304 if (!(ctrl & (1 << head))) {
305 for (i = 0; !(ctrl & (1 << head)) && i < disp->func->pior.nr; i++)
306 ctrl = nvkm_rd32(device, 0x610b80 + (i * 8));
310 if (!(ctrl & (1 << head)))
314 outp = exec_lookup(disp, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
318 *conf = (ctrl & 0x00000f00) >> 8;
319 if (outp->info.location == 0) {
320 switch (outp->info.type) {
321 case DCB_OUTPUT_TMDS:
325 case DCB_OUTPUT_LVDS:
326 *conf |= disp->sor.lvdsconf;
332 *conf = (ctrl & 0x00000f00) >> 8;
336 data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
337 &ver, &hdr, &cnt, &len, &info2);
338 if (data && id < 0xff) {
339 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
341 struct nvbios_init init = {
357 /* If programming a TMDS output on a SOR that can also be configured for
358 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
360 * It looks like the VBIOS TMDS scripts make an attempt at this, however,
361 * the VBIOS scripts on at least one board I have only switch it off on
362 * link 0, causing a blank display if the output has previously been
363 * programmed for DisplayPort.
366 nv50_disp_intr_unk40_0_tmds(struct nv50_disp *disp,
367 struct dcb_output *outp)
369 struct nvkm_device *device = disp->base.engine.subdev.device;
370 struct nvkm_bios *bios = device->bios;
371 const int link = !(outp->sorconf.link & 1);
372 const int or = ffs(outp->or) - 1;
373 const u32 loff = (or * 0x800) + (link * 0x80);
374 const u16 mask = (outp->sorconf.link << 6) | outp->or;
375 struct dcb_output match;
378 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
379 nvkm_mask(device, 0x61c10c + loff, 0x00000001, 0x00000000);
383 nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
385 struct nvkm_device *device = disp->base.engine.subdev.device;
386 struct nvkm_output *outp;
387 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
390 outp = exec_clkcmp(disp, head, 1, pclk, &conf);
394 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
395 nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
396 nv50_disp_dptmds_war_3(disp, &outp->info);
400 nv50_disp_intr_unk20_2_dp(struct nv50_disp *disp, int head,
401 struct dcb_output *outp, u32 pclk)
403 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
404 struct nvkm_device *device = subdev->device;
405 const int link = !(outp->sorconf.link & 1);
406 const int or = ffs(outp->or) - 1;
407 const u32 soff = ( or * 0x800);
408 const u32 loff = (link * 0x080) + soff;
409 const u32 ctrl = nvkm_rd32(device, 0x610794 + (or * 8));
410 const u32 symbol = 100000;
411 const s32 vactive = nvkm_rd32(device, 0x610af8 + (head * 0x540)) & 0xffff;
412 const s32 vblanke = nvkm_rd32(device, 0x610ae8 + (head * 0x540)) & 0xffff;
413 const s32 vblanks = nvkm_rd32(device, 0x610af0 + (head * 0x540)) & 0xffff;
414 u32 dpctrl = nvkm_rd32(device, 0x61c10c + loff);
415 u32 clksor = nvkm_rd32(device, 0x614300 + soff);
416 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
417 int TU, VTUi, VTUf, VTUa;
418 u64 link_data_rate, link_ratio, unk;
419 u32 best_diff = 64 * symbol;
420 u32 link_nr, link_bw, bits;
423 link_bw = (clksor & 0x000c0000) ? 270000 : 162000;
424 link_nr = hweight32(dpctrl & 0x000f0000);
426 /* symbols/hblank - algorithm taken from comments in tegra driver */
427 value = vblanke + vactive - vblanks - 7;
428 value = value * link_bw;
430 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
431 nvkm_mask(device, 0x61c1e8 + soff, 0x0000ffff, value);
433 /* symbols/vblank - algorithm taken from comments in tegra driver */
434 value = vblanks - vblanke - 25;
435 value = value * link_bw;
437 value = value - ((36 / link_nr) + 3) - 1;
438 nvkm_mask(device, 0x61c1ec + soff, 0x00ffffff, value);
440 /* watermark / activesym */
441 if ((ctrl & 0xf0000) == 0x60000) bits = 30;
442 else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
445 link_data_rate = (pclk * bits / 8) / link_nr;
447 /* calculate ratio of packed data rate to link symbol rate */
448 link_ratio = link_data_rate * symbol;
449 do_div(link_ratio, link_bw);
451 for (TU = 64; TU >= 32; TU--) {
452 /* calculate average number of valid symbols in each TU */
453 u32 tu_valid = link_ratio * TU;
456 /* find a hw representation for the fraction.. */
457 VTUi = tu_valid / symbol;
458 calc = VTUi * symbol;
459 diff = tu_valid - calc;
461 if (diff >= (symbol / 2)) {
462 VTUf = symbol / (symbol - diff);
463 if (symbol - (VTUf * diff))
468 calc += symbol - (symbol / VTUf);
476 VTUf = min((int)(symbol / diff), 15);
477 calc += symbol / VTUf;
480 diff = calc - tu_valid;
482 /* no remainder, but the hw doesn't like the fractional
483 * part to be zero. decrement the integer part and
484 * have the fraction add a whole symbol back
491 if (diff < best_diff) {
503 nvkm_error(subdev, "unable to find suitable dp config\n");
507 /* XXX close to vbios numbers, but not right */
508 unk = (symbol - link_ratio) * bestTU;
514 nvkm_mask(device, 0x61c10c + loff, 0x000001fc, bestTU << 2);
515 nvkm_mask(device, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
517 bestVTUi << 8 | unk);
521 nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
523 struct nvkm_device *device = disp->base.engine.subdev.device;
524 struct nvkm_output *outp;
525 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
526 u32 hval, hreg = 0x614200 + (head * 0x800);
530 outp = exec_clkcmp(disp, head, 0xff, pclk, &conf);
534 /* we allow both encoder attach and detach operations to occur
535 * within a single supervisor (ie. modeset) sequence. the
536 * encoder detach scripts quite often switch off power to the
537 * lanes, which requires the link to be re-trained.
539 * this is not generally an issue as the sink "must" (heh)
540 * signal an irq when it's lost sync so the driver can
543 * however, on some boards, if one does not configure at least
544 * the gpu side of the link *before* attaching, then various
545 * things can go horribly wrong (PDISP disappearing from mmio,
546 * third supervisor never happens, etc).
548 * the solution is simply to retrain here, if necessary. last
549 * i checked, the binary driver userspace does not appear to
550 * trigger this situation (it forces an UPDATE between steps).
552 if (outp->info.type == DCB_OUTPUT_DP) {
553 u32 soff = (ffs(outp->info.or) - 1) * 0x08;
556 if (outp->info.location == 0) {
557 ctrl = nvkm_rd32(device, 0x610794 + soff);
560 ctrl = nvkm_rd32(device, 0x610b80 + soff);
564 switch ((ctrl & 0x000f0000) >> 16) {
565 case 6: datarate = pclk * 30; break;
566 case 5: datarate = pclk * 24; break;
569 datarate = pclk * 18;
573 if (nvkm_output_dp_train(outp, datarate / soff))
574 OUTP_ERR(outp, "link not trained before attach");
577 exec_clkcmp(disp, head, 0, pclk, &conf);
579 if (!outp->info.location && outp->info.type == DCB_OUTPUT_ANALOG) {
580 oreg = 0x614280 + (ffs(outp->info.or) - 1) * 0x800;
585 if (!outp->info.location) {
586 if (outp->info.type == DCB_OUTPUT_DP)
587 nv50_disp_intr_unk20_2_dp(disp, head, &outp->info, pclk);
588 oreg = 0x614300 + (ffs(outp->info.or) - 1) * 0x800;
589 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
593 oreg = 0x614380 + (ffs(outp->info.or) - 1) * 0x800;
599 nvkm_mask(device, hreg, 0x0000000f, hval);
600 nvkm_mask(device, oreg, mask, oval);
602 nv50_disp_dptmds_war_2(disp, &outp->info);
606 nv50_disp_intr_unk20_1(struct nv50_disp *disp, int head)
608 struct nvkm_device *device = disp->base.engine.subdev.device;
609 struct nvkm_devinit *devinit = device->devinit;
610 u32 pclk = nvkm_rd32(device, 0x610ad0 + (head * 0x540)) & 0x3fffff;
612 nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head, pclk);
616 nv50_disp_intr_unk20_0(struct nv50_disp *disp, int head)
618 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
619 struct nvkm_output *outp = exec_script(disp, head, 2);
621 /* the binary driver does this outside of the supervisor handling
622 * (after the third supervisor from a detach). we (currently?)
623 * allow both detach/attach to happen in the same set of
624 * supervisor interrupts, so it would make sense to execute this
625 * (full power down?) script after all the detach phases of the
626 * supervisor handling. like with training if needed from the
627 * second supervisor, nvidia doesn't do this, so who knows if it's
628 * entirely safe, but it does appear to work..
630 * without this script being run, on some configurations i've
631 * seen, switching from DP to TMDS on a DP connector may result
632 * in a blank screen (SOR_PWR off/on can restore it)
634 if (outp && outp->info.type == DCB_OUTPUT_DP) {
635 struct nvkm_output_dp *outpdp = nvkm_output_dp(outp);
636 struct nvbios_init init = {
638 .bios = subdev->device->bios,
641 .offset = outpdp->info.script[4],
645 nvkm_notify_put(&outpdp->irq);
647 atomic_set(&outpdp->lt.done, 0);
652 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
654 exec_script(disp, head, 1);
658 nv50_disp_super(struct work_struct *work)
660 struct nv50_disp *disp =
661 container_of(work, struct nv50_disp, supervisor);
662 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
663 struct nvkm_device *device = subdev->device;
664 struct nvkm_head *head;
665 u32 super = nvkm_rd32(device, 0x610030);
667 nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
669 if (disp->super & 0x00000010) {
670 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
671 list_for_each_entry(head, &disp->base.head, head) {
672 if (!(super & (0x00000020 << head->id)))
674 if (!(super & (0x00000080 << head->id)))
676 nv50_disp_intr_unk10_0(disp, head->id);
679 if (disp->super & 0x00000020) {
680 list_for_each_entry(head, &disp->base.head, head) {
681 if (!(super & (0x00000080 << head->id)))
683 nv50_disp_intr_unk20_0(disp, head->id);
685 list_for_each_entry(head, &disp->base.head, head) {
686 if (!(super & (0x00000200 << head->id)))
688 nv50_disp_intr_unk20_1(disp, head->id);
690 list_for_each_entry(head, &disp->base.head, head) {
691 if (!(super & (0x00000080 << head->id)))
693 nv50_disp_intr_unk20_2(disp, head->id);
696 if (disp->super & 0x00000040) {
697 list_for_each_entry(head, &disp->base.head, head) {
698 if (!(super & (0x00000080 << head->id)))
700 nv50_disp_intr_unk40_0(disp, head->id);
702 nv50_disp_update_sppll1(disp);
705 nvkm_wr32(device, 0x610030, 0x80000000);
708 static const struct nvkm_enum
709 nv50_disp_intr_error_type[] = {
710 { 3, "ILLEGAL_MTHD" },
711 { 4, "INVALID_VALUE" },
712 { 5, "INVALID_STATE" },
713 { 7, "INVALID_HANDLE" },
717 static const struct nvkm_enum
718 nv50_disp_intr_error_code[] = {
724 nv50_disp_intr_error(struct nv50_disp *disp, int chid)
726 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
727 struct nvkm_device *device = subdev->device;
728 u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
729 u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
730 u32 code = (addr & 0x00ff0000) >> 16;
731 u32 type = (addr & 0x00007000) >> 12;
732 u32 mthd = (addr & 0x00000ffc);
733 const struct nvkm_enum *ec, *et;
735 et = nvkm_enum_find(nv50_disp_intr_error_type, type);
736 ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
739 "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
740 type, et ? et->name : "", code, ec ? ec->name : "",
743 if (chid < ARRAY_SIZE(disp->chan)) {
746 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
753 nvkm_wr32(device, 0x610020, 0x00010000 << chid);
754 nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
758 nv50_disp_intr(struct nv50_disp *disp)
760 struct nvkm_device *device = disp->base.engine.subdev.device;
761 u32 intr0 = nvkm_rd32(device, 0x610020);
762 u32 intr1 = nvkm_rd32(device, 0x610024);
764 while (intr0 & 0x001f0000) {
765 u32 chid = __ffs(intr0 & 0x001f0000) - 16;
766 nv50_disp_intr_error(disp, chid);
767 intr0 &= ~(0x00010000 << chid);
770 while (intr0 & 0x0000001f) {
771 u32 chid = __ffs(intr0 & 0x0000001f);
772 nv50_disp_chan_uevent_send(disp, chid);
773 intr0 &= ~(0x00000001 << chid);
776 if (intr1 & 0x00000004) {
777 nvkm_disp_vblank(&disp->base, 0);
778 nvkm_wr32(device, 0x610024, 0x00000004);
781 if (intr1 & 0x00000008) {
782 nvkm_disp_vblank(&disp->base, 1);
783 nvkm_wr32(device, 0x610024, 0x00000008);
786 if (intr1 & 0x00000070) {
787 disp->super = (intr1 & 0x00000070);
788 schedule_work(&disp->supervisor);
789 nvkm_wr32(device, 0x610024, disp->super);
793 static const struct nv50_disp_func
795 .intr = nv50_disp_intr,
796 .uevent = &nv50_disp_chan_uevent,
797 .super = nv50_disp_super,
798 .root = &nv50_disp_root_oclass,
799 .head.new = nv50_head_new,
800 .outp.internal.crt = nv50_dac_output_new,
801 .outp.internal.tmds = nv50_sor_output_new,
802 .outp.internal.lvds = nv50_sor_output_new,
803 .outp.external.tmds = nv50_pior_output_new,
804 .outp.external.dp = nv50_pior_dp_new,
806 .dac.power = nv50_dac_power,
807 .dac.sense = nv50_dac_sense,
809 .sor.power = nv50_sor_power,
811 .pior.power = nv50_pior_power,
815 nv50_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
817 return nv50_disp_new_(&nv50_disp, device, index, 2, pdisp);