]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/nouveau/nv50_fifo.c
Merge branch '3.3-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[karo-tx-linux.git] / drivers / gpu / drm / nouveau / nv50_fifo.c
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "drmP.h"
28 #include "drm.h"
29 #include "nouveau_drv.h"
30 #include "nouveau_ramht.h"
31 #include "nouveau_vm.h"
32
33 static void
34 nv50_fifo_playlist_update(struct drm_device *dev)
35 {
36         struct drm_nouveau_private *dev_priv = dev->dev_private;
37         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
38         struct nouveau_gpuobj *cur;
39         int i, nr;
40
41         NV_DEBUG(dev, "\n");
42
43         cur = pfifo->playlist[pfifo->cur_playlist];
44         pfifo->cur_playlist = !pfifo->cur_playlist;
45
46         /* We never schedule channel 0 or 127 */
47         for (i = 1, nr = 0; i < 127; i++) {
48                 if (dev_priv->channels.ptr[i] &&
49                     dev_priv->channels.ptr[i]->ramfc) {
50                         nv_wo32(cur, (nr * 4), i);
51                         nr++;
52                 }
53         }
54         dev_priv->engine.instmem.flush(dev);
55
56         nv_wr32(dev, 0x32f4, cur->vinst >> 12);
57         nv_wr32(dev, 0x32ec, nr);
58         nv_wr32(dev, 0x2500, 0x101);
59 }
60
61 static void
62 nv50_fifo_channel_enable(struct drm_device *dev, int channel)
63 {
64         struct drm_nouveau_private *dev_priv = dev->dev_private;
65         struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
66         uint32_t inst;
67
68         NV_DEBUG(dev, "ch%d\n", channel);
69
70         if (dev_priv->chipset == 0x50)
71                 inst = chan->ramfc->vinst >> 12;
72         else
73                 inst = chan->ramfc->vinst >> 8;
74
75         nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
76                      NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
77 }
78
79 static void
80 nv50_fifo_channel_disable(struct drm_device *dev, int channel)
81 {
82         struct drm_nouveau_private *dev_priv = dev->dev_private;
83         uint32_t inst;
84
85         NV_DEBUG(dev, "ch%d\n", channel);
86
87         if (dev_priv->chipset == 0x50)
88                 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
89         else
90                 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
91         nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
92 }
93
94 static void
95 nv50_fifo_init_reset(struct drm_device *dev)
96 {
97         uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
98
99         NV_DEBUG(dev, "\n");
100
101         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
102         nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |  pmc_e);
103 }
104
105 static void
106 nv50_fifo_init_intr(struct drm_device *dev)
107 {
108         NV_DEBUG(dev, "\n");
109
110         nouveau_irq_register(dev, 8, nv04_fifo_isr);
111         nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
112         nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
113 }
114
115 static void
116 nv50_fifo_init_context_table(struct drm_device *dev)
117 {
118         struct drm_nouveau_private *dev_priv = dev->dev_private;
119         int i;
120
121         NV_DEBUG(dev, "\n");
122
123         for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
124                 if (dev_priv->channels.ptr[i])
125                         nv50_fifo_channel_enable(dev, i);
126                 else
127                         nv50_fifo_channel_disable(dev, i);
128         }
129
130         nv50_fifo_playlist_update(dev);
131 }
132
133 static void
134 nv50_fifo_init_regs__nv(struct drm_device *dev)
135 {
136         NV_DEBUG(dev, "\n");
137
138         nv_wr32(dev, 0x250c, 0x6f3cfc34);
139 }
140
141 static void
142 nv50_fifo_init_regs(struct drm_device *dev)
143 {
144         NV_DEBUG(dev, "\n");
145
146         nv_wr32(dev, 0x2500, 0);
147         nv_wr32(dev, 0x3250, 0);
148         nv_wr32(dev, 0x3220, 0);
149         nv_wr32(dev, 0x3204, 0);
150         nv_wr32(dev, 0x3210, 0);
151         nv_wr32(dev, 0x3270, 0);
152         nv_wr32(dev, 0x2044, 0x01003fff);
153
154         /* Enable dummy channels setup by nv50_instmem.c */
155         nv50_fifo_channel_enable(dev, 0);
156         nv50_fifo_channel_enable(dev, 127);
157 }
158
159 int
160 nv50_fifo_init(struct drm_device *dev)
161 {
162         struct drm_nouveau_private *dev_priv = dev->dev_private;
163         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
164         int ret;
165
166         NV_DEBUG(dev, "\n");
167
168         if (pfifo->playlist[0]) {
169                 pfifo->cur_playlist = !pfifo->cur_playlist;
170                 goto just_reset;
171         }
172
173         ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
174                                  NVOBJ_FLAG_ZERO_ALLOC,
175                                  &pfifo->playlist[0]);
176         if (ret) {
177                 NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
178                 return ret;
179         }
180
181         ret = nouveau_gpuobj_new(dev, NULL, 128*4, 0x1000,
182                                  NVOBJ_FLAG_ZERO_ALLOC,
183                                  &pfifo->playlist[1]);
184         if (ret) {
185                 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
186                 NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
187                 return ret;
188         }
189
190 just_reset:
191         nv50_fifo_init_reset(dev);
192         nv50_fifo_init_intr(dev);
193         nv50_fifo_init_context_table(dev);
194         nv50_fifo_init_regs__nv(dev);
195         nv50_fifo_init_regs(dev);
196         dev_priv->engine.fifo.enable(dev);
197         dev_priv->engine.fifo.reassign(dev, true);
198
199         return 0;
200 }
201
202 void
203 nv50_fifo_takedown(struct drm_device *dev)
204 {
205         struct drm_nouveau_private *dev_priv = dev->dev_private;
206         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
207
208         NV_DEBUG(dev, "\n");
209
210         if (!pfifo->playlist[0])
211                 return;
212
213         nv_wr32(dev, 0x2140, 0x00000000);
214         nouveau_irq_unregister(dev, 8);
215
216         nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
217         nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
218 }
219
220 int
221 nv50_fifo_channel_id(struct drm_device *dev)
222 {
223         return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
224                         NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
225 }
226
227 int
228 nv50_fifo_create_context(struct nouveau_channel *chan)
229 {
230         struct drm_device *dev = chan->dev;
231         struct drm_nouveau_private *dev_priv = dev->dev_private;
232         struct nouveau_gpuobj *ramfc = NULL;
233         uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
234         unsigned long flags;
235         int ret;
236
237         NV_DEBUG(dev, "ch%d\n", chan->id);
238
239         if (dev_priv->chipset == 0x50) {
240                 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
241                                               chan->ramin->vinst, 0x100,
242                                               NVOBJ_FLAG_ZERO_ALLOC |
243                                               NVOBJ_FLAG_ZERO_FREE,
244                                               &chan->ramfc);
245                 if (ret)
246                         return ret;
247
248                 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst + 0x0400,
249                                               chan->ramin->vinst + 0x0400,
250                                               4096, 0, &chan->cache);
251                 if (ret)
252                         return ret;
253         } else {
254                 ret = nouveau_gpuobj_new(dev, chan, 0x100, 256,
255                                          NVOBJ_FLAG_ZERO_ALLOC |
256                                          NVOBJ_FLAG_ZERO_FREE, &chan->ramfc);
257                 if (ret)
258                         return ret;
259
260                 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024,
261                                          0, &chan->cache);
262                 if (ret)
263                         return ret;
264         }
265         ramfc = chan->ramfc;
266
267         chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
268                              NV50_USER(chan->id), PAGE_SIZE);
269         if (!chan->user)
270                 return -ENOMEM;
271
272         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
273
274         nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
275         nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
276                              (4 << 24) /* SEARCH_FULL */ |
277                              (chan->ramht->gpuobj->cinst >> 4));
278         nv_wo32(ramfc, 0x44, 0x01003fff);
279         nv_wo32(ramfc, 0x60, 0x7fffffff);
280         nv_wo32(ramfc, 0x40, 0x00000000);
281         nv_wo32(ramfc, 0x7c, 0x30000001);
282         nv_wo32(ramfc, 0x78, 0x00000000);
283         nv_wo32(ramfc, 0x3c, 0x403f6078);
284         nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
285         nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
286                 drm_order(chan->dma.ib_max + 1) << 16);
287
288         if (dev_priv->chipset != 0x50) {
289                 nv_wo32(chan->ramin, 0, chan->id);
290                 nv_wo32(chan->ramin, 4, chan->ramfc->vinst >> 8);
291
292                 nv_wo32(ramfc, 0x88, chan->cache->vinst >> 10);
293                 nv_wo32(ramfc, 0x98, chan->ramin->vinst >> 12);
294         }
295
296         dev_priv->engine.instmem.flush(dev);
297
298         nv50_fifo_channel_enable(dev, chan->id);
299         nv50_fifo_playlist_update(dev);
300         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
301         return 0;
302 }
303
304 void
305 nv50_fifo_destroy_context(struct nouveau_channel *chan)
306 {
307         struct drm_device *dev = chan->dev;
308         struct drm_nouveau_private *dev_priv = dev->dev_private;
309         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
310         struct nouveau_gpuobj *ramfc = NULL;
311         unsigned long flags;
312
313         NV_DEBUG(dev, "ch%d\n", chan->id);
314
315         spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
316         pfifo->reassign(dev, false);
317
318         /* Unload the context if it's the currently active one */
319         if (pfifo->channel_id(dev) == chan->id) {
320                 pfifo->disable(dev);
321                 pfifo->unload_context(dev);
322                 pfifo->enable(dev);
323         }
324
325         /* This will ensure the channel is seen as disabled. */
326         nouveau_gpuobj_ref(chan->ramfc, &ramfc);
327         nouveau_gpuobj_ref(NULL, &chan->ramfc);
328         nv50_fifo_channel_disable(dev, chan->id);
329
330         /* Dummy channel, also used on ch 127 */
331         if (chan->id == 0)
332                 nv50_fifo_channel_disable(dev, 127);
333         nv50_fifo_playlist_update(dev);
334
335         pfifo->reassign(dev, true);
336         spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
337
338         /* Free the channel resources */
339         if (chan->user) {
340                 iounmap(chan->user);
341                 chan->user = NULL;
342         }
343         nouveau_gpuobj_ref(NULL, &ramfc);
344         nouveau_gpuobj_ref(NULL, &chan->cache);
345 }
346
347 int
348 nv50_fifo_load_context(struct nouveau_channel *chan)
349 {
350         struct drm_device *dev = chan->dev;
351         struct drm_nouveau_private *dev_priv = dev->dev_private;
352         struct nouveau_gpuobj *ramfc = chan->ramfc;
353         struct nouveau_gpuobj *cache = chan->cache;
354         int ptr, cnt;
355
356         NV_DEBUG(dev, "ch%d\n", chan->id);
357
358         nv_wr32(dev, 0x3330, nv_ro32(ramfc, 0x00));
359         nv_wr32(dev, 0x3334, nv_ro32(ramfc, 0x04));
360         nv_wr32(dev, 0x3240, nv_ro32(ramfc, 0x08));
361         nv_wr32(dev, 0x3320, nv_ro32(ramfc, 0x0c));
362         nv_wr32(dev, 0x3244, nv_ro32(ramfc, 0x10));
363         nv_wr32(dev, 0x3328, nv_ro32(ramfc, 0x14));
364         nv_wr32(dev, 0x3368, nv_ro32(ramfc, 0x18));
365         nv_wr32(dev, 0x336c, nv_ro32(ramfc, 0x1c));
366         nv_wr32(dev, 0x3370, nv_ro32(ramfc, 0x20));
367         nv_wr32(dev, 0x3374, nv_ro32(ramfc, 0x24));
368         nv_wr32(dev, 0x3378, nv_ro32(ramfc, 0x28));
369         nv_wr32(dev, 0x337c, nv_ro32(ramfc, 0x2c));
370         nv_wr32(dev, 0x3228, nv_ro32(ramfc, 0x30));
371         nv_wr32(dev, 0x3364, nv_ro32(ramfc, 0x34));
372         nv_wr32(dev, 0x32a0, nv_ro32(ramfc, 0x38));
373         nv_wr32(dev, 0x3224, nv_ro32(ramfc, 0x3c));
374         nv_wr32(dev, 0x324c, nv_ro32(ramfc, 0x40));
375         nv_wr32(dev, 0x2044, nv_ro32(ramfc, 0x44));
376         nv_wr32(dev, 0x322c, nv_ro32(ramfc, 0x48));
377         nv_wr32(dev, 0x3234, nv_ro32(ramfc, 0x4c));
378         nv_wr32(dev, 0x3340, nv_ro32(ramfc, 0x50));
379         nv_wr32(dev, 0x3344, nv_ro32(ramfc, 0x54));
380         nv_wr32(dev, 0x3280, nv_ro32(ramfc, 0x58));
381         nv_wr32(dev, 0x3254, nv_ro32(ramfc, 0x5c));
382         nv_wr32(dev, 0x3260, nv_ro32(ramfc, 0x60));
383         nv_wr32(dev, 0x3264, nv_ro32(ramfc, 0x64));
384         nv_wr32(dev, 0x3268, nv_ro32(ramfc, 0x68));
385         nv_wr32(dev, 0x326c, nv_ro32(ramfc, 0x6c));
386         nv_wr32(dev, 0x32e4, nv_ro32(ramfc, 0x70));
387         nv_wr32(dev, 0x3248, nv_ro32(ramfc, 0x74));
388         nv_wr32(dev, 0x2088, nv_ro32(ramfc, 0x78));
389         nv_wr32(dev, 0x2058, nv_ro32(ramfc, 0x7c));
390         nv_wr32(dev, 0x2210, nv_ro32(ramfc, 0x80));
391
392         cnt = nv_ro32(ramfc, 0x84);
393         for (ptr = 0; ptr < cnt; ptr++) {
394                 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
395                         nv_ro32(cache, (ptr * 8) + 0));
396                 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
397                         nv_ro32(cache, (ptr * 8) + 4));
398         }
399         nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
400         nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
401
402         /* guessing that all the 0x34xx regs aren't on NV50 */
403         if (dev_priv->chipset != 0x50) {
404                 nv_wr32(dev, 0x340c, nv_ro32(ramfc, 0x88));
405                 nv_wr32(dev, 0x3400, nv_ro32(ramfc, 0x8c));
406                 nv_wr32(dev, 0x3404, nv_ro32(ramfc, 0x90));
407                 nv_wr32(dev, 0x3408, nv_ro32(ramfc, 0x94));
408                 nv_wr32(dev, 0x3410, nv_ro32(ramfc, 0x98));
409         }
410
411         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
412         return 0;
413 }
414
415 int
416 nv50_fifo_unload_context(struct drm_device *dev)
417 {
418         struct drm_nouveau_private *dev_priv = dev->dev_private;
419         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
420         struct nouveau_gpuobj *ramfc, *cache;
421         struct nouveau_channel *chan = NULL;
422         int chid, get, put, ptr;
423
424         NV_DEBUG(dev, "\n");
425
426         chid = pfifo->channel_id(dev);
427         if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
428                 return 0;
429
430         chan = dev_priv->channels.ptr[chid];
431         if (!chan) {
432                 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
433                 return -EINVAL;
434         }
435         NV_DEBUG(dev, "ch%d\n", chan->id);
436         ramfc = chan->ramfc;
437         cache = chan->cache;
438
439         nv_wo32(ramfc, 0x00, nv_rd32(dev, 0x3330));
440         nv_wo32(ramfc, 0x04, nv_rd32(dev, 0x3334));
441         nv_wo32(ramfc, 0x08, nv_rd32(dev, 0x3240));
442         nv_wo32(ramfc, 0x0c, nv_rd32(dev, 0x3320));
443         nv_wo32(ramfc, 0x10, nv_rd32(dev, 0x3244));
444         nv_wo32(ramfc, 0x14, nv_rd32(dev, 0x3328));
445         nv_wo32(ramfc, 0x18, nv_rd32(dev, 0x3368));
446         nv_wo32(ramfc, 0x1c, nv_rd32(dev, 0x336c));
447         nv_wo32(ramfc, 0x20, nv_rd32(dev, 0x3370));
448         nv_wo32(ramfc, 0x24, nv_rd32(dev, 0x3374));
449         nv_wo32(ramfc, 0x28, nv_rd32(dev, 0x3378));
450         nv_wo32(ramfc, 0x2c, nv_rd32(dev, 0x337c));
451         nv_wo32(ramfc, 0x30, nv_rd32(dev, 0x3228));
452         nv_wo32(ramfc, 0x34, nv_rd32(dev, 0x3364));
453         nv_wo32(ramfc, 0x38, nv_rd32(dev, 0x32a0));
454         nv_wo32(ramfc, 0x3c, nv_rd32(dev, 0x3224));
455         nv_wo32(ramfc, 0x40, nv_rd32(dev, 0x324c));
456         nv_wo32(ramfc, 0x44, nv_rd32(dev, 0x2044));
457         nv_wo32(ramfc, 0x48, nv_rd32(dev, 0x322c));
458         nv_wo32(ramfc, 0x4c, nv_rd32(dev, 0x3234));
459         nv_wo32(ramfc, 0x50, nv_rd32(dev, 0x3340));
460         nv_wo32(ramfc, 0x54, nv_rd32(dev, 0x3344));
461         nv_wo32(ramfc, 0x58, nv_rd32(dev, 0x3280));
462         nv_wo32(ramfc, 0x5c, nv_rd32(dev, 0x3254));
463         nv_wo32(ramfc, 0x60, nv_rd32(dev, 0x3260));
464         nv_wo32(ramfc, 0x64, nv_rd32(dev, 0x3264));
465         nv_wo32(ramfc, 0x68, nv_rd32(dev, 0x3268));
466         nv_wo32(ramfc, 0x6c, nv_rd32(dev, 0x326c));
467         nv_wo32(ramfc, 0x70, nv_rd32(dev, 0x32e4));
468         nv_wo32(ramfc, 0x74, nv_rd32(dev, 0x3248));
469         nv_wo32(ramfc, 0x78, nv_rd32(dev, 0x2088));
470         nv_wo32(ramfc, 0x7c, nv_rd32(dev, 0x2058));
471         nv_wo32(ramfc, 0x80, nv_rd32(dev, 0x2210));
472
473         put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
474         get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
475         ptr = 0;
476         while (put != get) {
477                 nv_wo32(cache, ptr + 0,
478                         nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
479                 nv_wo32(cache, ptr + 4,
480                         nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
481                 get = (get + 1) & 0x1ff;
482                 ptr += 8;
483         }
484
485         /* guessing that all the 0x34xx regs aren't on NV50 */
486         if (dev_priv->chipset != 0x50) {
487                 nv_wo32(ramfc, 0x84, ptr >> 3);
488                 nv_wo32(ramfc, 0x88, nv_rd32(dev, 0x340c));
489                 nv_wo32(ramfc, 0x8c, nv_rd32(dev, 0x3400));
490                 nv_wo32(ramfc, 0x90, nv_rd32(dev, 0x3404));
491                 nv_wo32(ramfc, 0x94, nv_rd32(dev, 0x3408));
492                 nv_wo32(ramfc, 0x98, nv_rd32(dev, 0x3410));
493         }
494
495         dev_priv->engine.instmem.flush(dev);
496
497         /*XXX: probably reload ch127 (NULL) state back too */
498         nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
499         return 0;
500 }
501
502 void
503 nv50_fifo_tlb_flush(struct drm_device *dev)
504 {
505         nv50_vm_flush_engine(dev, 5);
506 }