2 * Copyright 2015 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
27 #include <subdev/fb.h>
28 #include <subdev/secboot.h>
30 #include <nvif/class.h>
32 /*******************************************************************************
33 * PGRAPH engine/subdev functions
34 ******************************************************************************/
37 gm200_gr_rops(struct gf100_gr *gr)
39 return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
43 gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
45 struct nvkm_device *device = gr->base.engine.subdev.device;
47 nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf0001fff);
48 nvkm_wr32(device, 0x418890, 0x00000000);
49 nvkm_wr32(device, 0x418894, 0x00000000);
51 nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
52 nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
53 nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
57 gm200_gr_init(struct gf100_gr *gr)
59 struct nvkm_device *device = gr->base.engine.subdev.device;
60 struct nvkm_fb *fb = device->fb;
61 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
62 u32 data[TPC_MAX / 8] = {};
64 int gpc, tpc, ppc, rop;
67 /*XXX: belongs in fb */
68 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
69 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
70 nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
71 gr->func->init_gpc_mmu(gr);
73 gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
75 gm107_gr_init_bios(gr);
77 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
79 memset(data, 0x00, sizeof(data));
80 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
81 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
83 gpc = (gpc + 1) % gr->gpc_nr;
84 } while (!tpcnr[gpc]);
85 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
87 data[i / 8] |= tpc << ((i % 8) * 4);
90 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
91 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
92 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
93 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
95 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
96 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
97 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
98 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
100 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
103 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
104 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
105 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
107 nvkm_wr32(device, 0x400500, 0x00010001);
108 nvkm_wr32(device, 0x400100, 0xffffffff);
109 nvkm_wr32(device, 0x40013c, 0xffffffff);
110 nvkm_wr32(device, 0x400124, 0x00000002);
111 nvkm_wr32(device, 0x409c24, 0x000e0000);
112 nvkm_wr32(device, 0x405848, 0xc0000000);
113 nvkm_wr32(device, 0x40584c, 0x00000001);
114 nvkm_wr32(device, 0x404000, 0xc0000000);
115 nvkm_wr32(device, 0x404600, 0xc0000000);
116 nvkm_wr32(device, 0x408030, 0xc0000000);
117 nvkm_wr32(device, 0x404490, 0xc0000000);
118 nvkm_wr32(device, 0x406018, 0xc0000000);
119 nvkm_wr32(device, 0x407020, 0x40000000);
120 nvkm_wr32(device, 0x405840, 0xc0000000);
121 nvkm_wr32(device, 0x405844, 0x00ffffff);
122 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
124 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
125 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
126 nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
127 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
128 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
129 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
130 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
131 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
132 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
133 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
134 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
135 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
136 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
137 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
138 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
139 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000005);
141 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
142 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
145 for (rop = 0; rop < gr->rop_nr; rop++) {
146 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
147 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
148 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
149 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
152 nvkm_wr32(device, 0x400108, 0xffffffff);
153 nvkm_wr32(device, 0x400138, 0xffffffff);
154 nvkm_wr32(device, 0x400118, 0xffffffff);
155 nvkm_wr32(device, 0x400130, 0xffffffff);
156 nvkm_wr32(device, 0x40011c, 0xffffffff);
157 nvkm_wr32(device, 0x400134, 0xffffffff);
159 nvkm_wr32(device, 0x400054, 0x2c350f63);
161 gf100_gr_zbc_init(gr);
163 return gf100_gr_init_ctxctl(gr);
167 gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
168 int index, struct nvkm_gr **pgr)
173 if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL)))
177 ret = gf100_gr_ctor(func, device, index, gr);
181 /* Load firmwares for non-secure falcons */
182 if (!nvkm_secboot_is_managed(device->secboot,
183 NVKM_SECBOOT_FALCON_FECS)) {
184 if ((ret = gf100_gr_ctor_fw(gr, "gr/fecs_inst", &gr->fuc409c)) ||
185 (ret = gf100_gr_ctor_fw(gr, "gr/fecs_data", &gr->fuc409d)))
188 if (!nvkm_secboot_is_managed(device->secboot,
189 NVKM_SECBOOT_FALCON_GPCCS)) {
190 if ((ret = gf100_gr_ctor_fw(gr, "gr/gpccs_inst", &gr->fuc41ac)) ||
191 (ret = gf100_gr_ctor_fw(gr, "gr/gpccs_data", &gr->fuc41ad)))
195 if ((ret = gk20a_gr_av_to_init(gr, "gr/sw_nonctx", &gr->fuc_sw_nonctx)) ||
196 (ret = gk20a_gr_aiv_to_init(gr, "gr/sw_ctx", &gr->fuc_sw_ctx)) ||
197 (ret = gk20a_gr_av_to_init(gr, "gr/sw_bundle_init", &gr->fuc_bundle)) ||
198 (ret = gk20a_gr_av_to_method(gr, "gr/sw_method_init", &gr->fuc_method)))
204 static const struct gf100_gr_func
206 .init = gm200_gr_init,
207 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
208 .rops = gm200_gr_rops,
210 .grctx = &gm200_grctx,
212 { -1, -1, FERMI_TWOD_A },
213 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
214 { -1, -1, MAXWELL_B, &gf100_fermi },
215 { -1, -1, MAXWELL_COMPUTE_B },
221 gm200_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
223 return gm200_gr_new_(&gm200_gr, device, index, pgr);