]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_dma.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vegard...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "drm_crtc_helper.h"
32 #include "drm_fb_helper.h"
33 #include "intel_drv.h"
34 #include "i915_drm.h"
35 #include "i915_drv.h"
36 #include <linux/vgaarb.h>
37
38 /* Really want an OS-independent resettable timer.  Would like to have
39  * this loop run for (eg) 3 sec, but have the timer reset every time
40  * the head pointer changes, so that EBUSY only happens if the ring
41  * actually stalls for (eg) 3 seconds.
42  */
43 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
44 {
45         drm_i915_private_t *dev_priv = dev->dev_private;
46         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
47         u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
48         u32 last_acthd = I915_READ(acthd_reg);
49         u32 acthd;
50         u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
51         int i;
52
53         for (i = 0; i < 100000; i++) {
54                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
55                 acthd = I915_READ(acthd_reg);
56                 ring->space = ring->head - (ring->tail + 8);
57                 if (ring->space < 0)
58                         ring->space += ring->Size;
59                 if (ring->space >= n)
60                         return 0;
61
62                 if (dev->primary->master) {
63                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
64                         if (master_priv->sarea_priv)
65                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
66                 }
67
68
69                 if (ring->head != last_head)
70                         i = 0;
71                 if (acthd != last_acthd)
72                         i = 0;
73
74                 last_head = ring->head;
75                 last_acthd = acthd;
76                 msleep_interruptible(10);
77
78         }
79
80         return -EBUSY;
81 }
82
83 /* As a ringbuffer is only allowed to wrap between instructions, fill
84  * the tail with NOOPs.
85  */
86 int i915_wrap_ring(struct drm_device *dev)
87 {
88         drm_i915_private_t *dev_priv = dev->dev_private;
89         volatile unsigned int *virt;
90         int rem;
91
92         rem = dev_priv->ring.Size - dev_priv->ring.tail;
93         if (dev_priv->ring.space < rem) {
94                 int ret = i915_wait_ring(dev, rem, __func__);
95                 if (ret)
96                         return ret;
97         }
98         dev_priv->ring.space -= rem;
99
100         virt = (unsigned int *)
101                 (dev_priv->ring.virtual_start + dev_priv->ring.tail);
102         rem /= 4;
103         while (rem--)
104                 *virt++ = MI_NOOP;
105
106         dev_priv->ring.tail = 0;
107
108         return 0;
109 }
110
111 /**
112  * Sets up the hardware status page for devices that need a physical address
113  * in the register.
114  */
115 static int i915_init_phys_hws(struct drm_device *dev)
116 {
117         drm_i915_private_t *dev_priv = dev->dev_private;
118         /* Program Hardware Status Page */
119         dev_priv->status_page_dmah =
120                 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
121
122         if (!dev_priv->status_page_dmah) {
123                 DRM_ERROR("Can not allocate hardware status page\n");
124                 return -ENOMEM;
125         }
126         dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
127         dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
128
129         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
130
131         I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
132         DRM_DEBUG_DRIVER("Enabled hardware status page\n");
133         return 0;
134 }
135
136 /**
137  * Frees the hardware status page, whether it's a physical address or a virtual
138  * address set up by the X Server.
139  */
140 static void i915_free_hws(struct drm_device *dev)
141 {
142         drm_i915_private_t *dev_priv = dev->dev_private;
143         if (dev_priv->status_page_dmah) {
144                 drm_pci_free(dev, dev_priv->status_page_dmah);
145                 dev_priv->status_page_dmah = NULL;
146         }
147
148         if (dev_priv->status_gfx_addr) {
149                 dev_priv->status_gfx_addr = 0;
150                 drm_core_ioremapfree(&dev_priv->hws_map, dev);
151         }
152
153         /* Need to rewrite hardware status page */
154         I915_WRITE(HWS_PGA, 0x1ffff000);
155 }
156
157 void i915_kernel_lost_context(struct drm_device * dev)
158 {
159         drm_i915_private_t *dev_priv = dev->dev_private;
160         struct drm_i915_master_private *master_priv;
161         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
162
163         /*
164          * We should never lose context on the ring with modesetting
165          * as we don't expose it to userspace
166          */
167         if (drm_core_check_feature(dev, DRIVER_MODESET))
168                 return;
169
170         ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
171         ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
172         ring->space = ring->head - (ring->tail + 8);
173         if (ring->space < 0)
174                 ring->space += ring->Size;
175
176         if (!dev->primary->master)
177                 return;
178
179         master_priv = dev->primary->master->driver_priv;
180         if (ring->head == ring->tail && master_priv->sarea_priv)
181                 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
182 }
183
184 static int i915_dma_cleanup(struct drm_device * dev)
185 {
186         drm_i915_private_t *dev_priv = dev->dev_private;
187         /* Make sure interrupts are disabled here because the uninstall ioctl
188          * may not have been called from userspace and after dev_private
189          * is freed, it's too late.
190          */
191         if (dev->irq_enabled)
192                 drm_irq_uninstall(dev);
193
194         if (dev_priv->ring.virtual_start) {
195                 drm_core_ioremapfree(&dev_priv->ring.map, dev);
196                 dev_priv->ring.virtual_start = NULL;
197                 dev_priv->ring.map.handle = NULL;
198                 dev_priv->ring.map.size = 0;
199         }
200
201         /* Clear the HWS virtual address at teardown */
202         if (I915_NEED_GFX_HWS(dev))
203                 i915_free_hws(dev);
204
205         return 0;
206 }
207
208 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
209 {
210         drm_i915_private_t *dev_priv = dev->dev_private;
211         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
212
213         master_priv->sarea = drm_getsarea(dev);
214         if (master_priv->sarea) {
215                 master_priv->sarea_priv = (drm_i915_sarea_t *)
216                         ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
217         } else {
218                 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
219         }
220
221         if (init->ring_size != 0) {
222                 if (dev_priv->ring.ring_obj != NULL) {
223                         i915_dma_cleanup(dev);
224                         DRM_ERROR("Client tried to initialize ringbuffer in "
225                                   "GEM mode\n");
226                         return -EINVAL;
227                 }
228
229                 dev_priv->ring.Size = init->ring_size;
230
231                 dev_priv->ring.map.offset = init->ring_start;
232                 dev_priv->ring.map.size = init->ring_size;
233                 dev_priv->ring.map.type = 0;
234                 dev_priv->ring.map.flags = 0;
235                 dev_priv->ring.map.mtrr = 0;
236
237                 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
238
239                 if (dev_priv->ring.map.handle == NULL) {
240                         i915_dma_cleanup(dev);
241                         DRM_ERROR("can not ioremap virtual address for"
242                                   " ring buffer\n");
243                         return -ENOMEM;
244                 }
245         }
246
247         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
248
249         dev_priv->cpp = init->cpp;
250         dev_priv->back_offset = init->back_offset;
251         dev_priv->front_offset = init->front_offset;
252         dev_priv->current_page = 0;
253         if (master_priv->sarea_priv)
254                 master_priv->sarea_priv->pf_current_page = 0;
255
256         /* Allow hardware batchbuffers unless told otherwise.
257          */
258         dev_priv->allow_batchbuffer = 1;
259
260         return 0;
261 }
262
263 static int i915_dma_resume(struct drm_device * dev)
264 {
265         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
266
267         DRM_DEBUG_DRIVER("%s\n", __func__);
268
269         if (dev_priv->ring.map.handle == NULL) {
270                 DRM_ERROR("can not ioremap virtual address for"
271                           " ring buffer\n");
272                 return -ENOMEM;
273         }
274
275         /* Program Hardware Status Page */
276         if (!dev_priv->hw_status_page) {
277                 DRM_ERROR("Can not find hardware status page\n");
278                 return -EINVAL;
279         }
280         DRM_DEBUG_DRIVER("hw status page @ %p\n",
281                                 dev_priv->hw_status_page);
282
283         if (dev_priv->status_gfx_addr != 0)
284                 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
285         else
286                 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
287         DRM_DEBUG_DRIVER("Enabled hardware status page\n");
288
289         return 0;
290 }
291
292 static int i915_dma_init(struct drm_device *dev, void *data,
293                          struct drm_file *file_priv)
294 {
295         drm_i915_init_t *init = data;
296         int retcode = 0;
297
298         switch (init->func) {
299         case I915_INIT_DMA:
300                 retcode = i915_initialize(dev, init);
301                 break;
302         case I915_CLEANUP_DMA:
303                 retcode = i915_dma_cleanup(dev);
304                 break;
305         case I915_RESUME_DMA:
306                 retcode = i915_dma_resume(dev);
307                 break;
308         default:
309                 retcode = -EINVAL;
310                 break;
311         }
312
313         return retcode;
314 }
315
316 /* Implement basically the same security restrictions as hardware does
317  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
318  *
319  * Most of the calculations below involve calculating the size of a
320  * particular instruction.  It's important to get the size right as
321  * that tells us where the next instruction to check is.  Any illegal
322  * instruction detected will be given a size of zero, which is a
323  * signal to abort the rest of the buffer.
324  */
325 static int do_validate_cmd(int cmd)
326 {
327         switch (((cmd >> 29) & 0x7)) {
328         case 0x0:
329                 switch ((cmd >> 23) & 0x3f) {
330                 case 0x0:
331                         return 1;       /* MI_NOOP */
332                 case 0x4:
333                         return 1;       /* MI_FLUSH */
334                 default:
335                         return 0;       /* disallow everything else */
336                 }
337                 break;
338         case 0x1:
339                 return 0;       /* reserved */
340         case 0x2:
341                 return (cmd & 0xff) + 2;        /* 2d commands */
342         case 0x3:
343                 if (((cmd >> 24) & 0x1f) <= 0x18)
344                         return 1;
345
346                 switch ((cmd >> 24) & 0x1f) {
347                 case 0x1c:
348                         return 1;
349                 case 0x1d:
350                         switch ((cmd >> 16) & 0xff) {
351                         case 0x3:
352                                 return (cmd & 0x1f) + 2;
353                         case 0x4:
354                                 return (cmd & 0xf) + 2;
355                         default:
356                                 return (cmd & 0xffff) + 2;
357                         }
358                 case 0x1e:
359                         if (cmd & (1 << 23))
360                                 return (cmd & 0xffff) + 1;
361                         else
362                                 return 1;
363                 case 0x1f:
364                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
365                                 return (cmd & 0x1ffff) + 2;
366                         else if (cmd & (1 << 17))       /* indirect random */
367                                 if ((cmd & 0xffff) == 0)
368                                         return 0;       /* unknown length, too hard */
369                                 else
370                                         return (((cmd & 0xffff) + 1) / 2) + 1;
371                         else
372                                 return 2;       /* indirect sequential */
373                 default:
374                         return 0;
375                 }
376         default:
377                 return 0;
378         }
379
380         return 0;
381 }
382
383 static int validate_cmd(int cmd)
384 {
385         int ret = do_validate_cmd(cmd);
386
387 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
388
389         return ret;
390 }
391
392 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
393 {
394         drm_i915_private_t *dev_priv = dev->dev_private;
395         int i;
396         RING_LOCALS;
397
398         if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
399                 return -EINVAL;
400
401         BEGIN_LP_RING((dwords+1)&~1);
402
403         for (i = 0; i < dwords;) {
404                 int cmd, sz;
405
406                 cmd = buffer[i];
407
408                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
409                         return -EINVAL;
410
411                 OUT_RING(cmd);
412
413                 while (++i, --sz) {
414                         OUT_RING(buffer[i]);
415                 }
416         }
417
418         if (dwords & 1)
419                 OUT_RING(0);
420
421         ADVANCE_LP_RING();
422
423         return 0;
424 }
425
426 int
427 i915_emit_box(struct drm_device *dev,
428               struct drm_clip_rect *boxes,
429               int i, int DR1, int DR4)
430 {
431         drm_i915_private_t *dev_priv = dev->dev_private;
432         struct drm_clip_rect box = boxes[i];
433         RING_LOCALS;
434
435         if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
436                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
437                           box.x1, box.y1, box.x2, box.y2);
438                 return -EINVAL;
439         }
440
441         if (IS_I965G(dev)) {
442                 BEGIN_LP_RING(4);
443                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
444                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
445                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
446                 OUT_RING(DR4);
447                 ADVANCE_LP_RING();
448         } else {
449                 BEGIN_LP_RING(6);
450                 OUT_RING(GFX_OP_DRAWRECT_INFO);
451                 OUT_RING(DR1);
452                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
453                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
454                 OUT_RING(DR4);
455                 OUT_RING(0);
456                 ADVANCE_LP_RING();
457         }
458
459         return 0;
460 }
461
462 /* XXX: Emitting the counter should really be moved to part of the IRQ
463  * emit. For now, do it in both places:
464  */
465
466 static void i915_emit_breadcrumb(struct drm_device *dev)
467 {
468         drm_i915_private_t *dev_priv = dev->dev_private;
469         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
470         RING_LOCALS;
471
472         dev_priv->counter++;
473         if (dev_priv->counter > 0x7FFFFFFFUL)
474                 dev_priv->counter = 0;
475         if (master_priv->sarea_priv)
476                 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
477
478         BEGIN_LP_RING(4);
479         OUT_RING(MI_STORE_DWORD_INDEX);
480         OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
481         OUT_RING(dev_priv->counter);
482         OUT_RING(0);
483         ADVANCE_LP_RING();
484 }
485
486 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
487                                    drm_i915_cmdbuffer_t *cmd,
488                                    struct drm_clip_rect *cliprects,
489                                    void *cmdbuf)
490 {
491         int nbox = cmd->num_cliprects;
492         int i = 0, count, ret;
493
494         if (cmd->sz & 0x3) {
495                 DRM_ERROR("alignment");
496                 return -EINVAL;
497         }
498
499         i915_kernel_lost_context(dev);
500
501         count = nbox ? nbox : 1;
502
503         for (i = 0; i < count; i++) {
504                 if (i < nbox) {
505                         ret = i915_emit_box(dev, cliprects, i,
506                                             cmd->DR1, cmd->DR4);
507                         if (ret)
508                                 return ret;
509                 }
510
511                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
512                 if (ret)
513                         return ret;
514         }
515
516         i915_emit_breadcrumb(dev);
517         return 0;
518 }
519
520 static int i915_dispatch_batchbuffer(struct drm_device * dev,
521                                      drm_i915_batchbuffer_t * batch,
522                                      struct drm_clip_rect *cliprects)
523 {
524         drm_i915_private_t *dev_priv = dev->dev_private;
525         int nbox = batch->num_cliprects;
526         int i = 0, count;
527         RING_LOCALS;
528
529         if ((batch->start | batch->used) & 0x7) {
530                 DRM_ERROR("alignment");
531                 return -EINVAL;
532         }
533
534         i915_kernel_lost_context(dev);
535
536         count = nbox ? nbox : 1;
537
538         for (i = 0; i < count; i++) {
539                 if (i < nbox) {
540                         int ret = i915_emit_box(dev, cliprects, i,
541                                                 batch->DR1, batch->DR4);
542                         if (ret)
543                                 return ret;
544                 }
545
546                 if (!IS_I830(dev) && !IS_845G(dev)) {
547                         BEGIN_LP_RING(2);
548                         if (IS_I965G(dev)) {
549                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
550                                 OUT_RING(batch->start);
551                         } else {
552                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
553                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
554                         }
555                         ADVANCE_LP_RING();
556                 } else {
557                         BEGIN_LP_RING(4);
558                         OUT_RING(MI_BATCH_BUFFER);
559                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
560                         OUT_RING(batch->start + batch->used - 4);
561                         OUT_RING(0);
562                         ADVANCE_LP_RING();
563                 }
564         }
565
566         i915_emit_breadcrumb(dev);
567
568         return 0;
569 }
570
571 static int i915_dispatch_flip(struct drm_device * dev)
572 {
573         drm_i915_private_t *dev_priv = dev->dev_private;
574         struct drm_i915_master_private *master_priv =
575                 dev->primary->master->driver_priv;
576         RING_LOCALS;
577
578         if (!master_priv->sarea_priv)
579                 return -EINVAL;
580
581         DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
582                           __func__,
583                          dev_priv->current_page,
584                          master_priv->sarea_priv->pf_current_page);
585
586         i915_kernel_lost_context(dev);
587
588         BEGIN_LP_RING(2);
589         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
590         OUT_RING(0);
591         ADVANCE_LP_RING();
592
593         BEGIN_LP_RING(6);
594         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
595         OUT_RING(0);
596         if (dev_priv->current_page == 0) {
597                 OUT_RING(dev_priv->back_offset);
598                 dev_priv->current_page = 1;
599         } else {
600                 OUT_RING(dev_priv->front_offset);
601                 dev_priv->current_page = 0;
602         }
603         OUT_RING(0);
604         ADVANCE_LP_RING();
605
606         BEGIN_LP_RING(2);
607         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
608         OUT_RING(0);
609         ADVANCE_LP_RING();
610
611         master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
612
613         BEGIN_LP_RING(4);
614         OUT_RING(MI_STORE_DWORD_INDEX);
615         OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
616         OUT_RING(dev_priv->counter);
617         OUT_RING(0);
618         ADVANCE_LP_RING();
619
620         master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
621         return 0;
622 }
623
624 static int i915_quiescent(struct drm_device * dev)
625 {
626         drm_i915_private_t *dev_priv = dev->dev_private;
627
628         i915_kernel_lost_context(dev);
629         return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
630 }
631
632 static int i915_flush_ioctl(struct drm_device *dev, void *data,
633                             struct drm_file *file_priv)
634 {
635         int ret;
636
637         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
638
639         mutex_lock(&dev->struct_mutex);
640         ret = i915_quiescent(dev);
641         mutex_unlock(&dev->struct_mutex);
642
643         return ret;
644 }
645
646 static int i915_batchbuffer(struct drm_device *dev, void *data,
647                             struct drm_file *file_priv)
648 {
649         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
650         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
651         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
652             master_priv->sarea_priv;
653         drm_i915_batchbuffer_t *batch = data;
654         int ret;
655         struct drm_clip_rect *cliprects = NULL;
656
657         if (!dev_priv->allow_batchbuffer) {
658                 DRM_ERROR("Batchbuffer ioctl disabled\n");
659                 return -EINVAL;
660         }
661
662         DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
663                         batch->start, batch->used, batch->num_cliprects);
664
665         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
666
667         if (batch->num_cliprects < 0)
668                 return -EINVAL;
669
670         if (batch->num_cliprects) {
671                 cliprects = kcalloc(batch->num_cliprects,
672                                     sizeof(struct drm_clip_rect),
673                                     GFP_KERNEL);
674                 if (cliprects == NULL)
675                         return -ENOMEM;
676
677                 ret = copy_from_user(cliprects, batch->cliprects,
678                                      batch->num_cliprects *
679                                      sizeof(struct drm_clip_rect));
680                 if (ret != 0)
681                         goto fail_free;
682         }
683
684         mutex_lock(&dev->struct_mutex);
685         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
686         mutex_unlock(&dev->struct_mutex);
687
688         if (sarea_priv)
689                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
690
691 fail_free:
692         kfree(cliprects);
693
694         return ret;
695 }
696
697 static int i915_cmdbuffer(struct drm_device *dev, void *data,
698                           struct drm_file *file_priv)
699 {
700         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
701         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
702         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
703             master_priv->sarea_priv;
704         drm_i915_cmdbuffer_t *cmdbuf = data;
705         struct drm_clip_rect *cliprects = NULL;
706         void *batch_data;
707         int ret;
708
709         DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
710                         cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
711
712         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
713
714         if (cmdbuf->num_cliprects < 0)
715                 return -EINVAL;
716
717         batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
718         if (batch_data == NULL)
719                 return -ENOMEM;
720
721         ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
722         if (ret != 0)
723                 goto fail_batch_free;
724
725         if (cmdbuf->num_cliprects) {
726                 cliprects = kcalloc(cmdbuf->num_cliprects,
727                                     sizeof(struct drm_clip_rect), GFP_KERNEL);
728                 if (cliprects == NULL)
729                         goto fail_batch_free;
730
731                 ret = copy_from_user(cliprects, cmdbuf->cliprects,
732                                      cmdbuf->num_cliprects *
733                                      sizeof(struct drm_clip_rect));
734                 if (ret != 0)
735                         goto fail_clip_free;
736         }
737
738         mutex_lock(&dev->struct_mutex);
739         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
740         mutex_unlock(&dev->struct_mutex);
741         if (ret) {
742                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
743                 goto fail_clip_free;
744         }
745
746         if (sarea_priv)
747                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
748
749 fail_clip_free:
750         kfree(cliprects);
751 fail_batch_free:
752         kfree(batch_data);
753
754         return ret;
755 }
756
757 static int i915_flip_bufs(struct drm_device *dev, void *data,
758                           struct drm_file *file_priv)
759 {
760         int ret;
761
762         DRM_DEBUG_DRIVER("%s\n", __func__);
763
764         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
765
766         mutex_lock(&dev->struct_mutex);
767         ret = i915_dispatch_flip(dev);
768         mutex_unlock(&dev->struct_mutex);
769
770         return ret;
771 }
772
773 static int i915_getparam(struct drm_device *dev, void *data,
774                          struct drm_file *file_priv)
775 {
776         drm_i915_private_t *dev_priv = dev->dev_private;
777         drm_i915_getparam_t *param = data;
778         int value;
779
780         if (!dev_priv) {
781                 DRM_ERROR("called with no initialization\n");
782                 return -EINVAL;
783         }
784
785         switch (param->param) {
786         case I915_PARAM_IRQ_ACTIVE:
787                 value = dev->pdev->irq ? 1 : 0;
788                 break;
789         case I915_PARAM_ALLOW_BATCHBUFFER:
790                 value = dev_priv->allow_batchbuffer ? 1 : 0;
791                 break;
792         case I915_PARAM_LAST_DISPATCH:
793                 value = READ_BREADCRUMB(dev_priv);
794                 break;
795         case I915_PARAM_CHIPSET_ID:
796                 value = dev->pci_device;
797                 break;
798         case I915_PARAM_HAS_GEM:
799                 value = dev_priv->has_gem;
800                 break;
801         case I915_PARAM_NUM_FENCES_AVAIL:
802                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
803                 break;
804         default:
805                 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
806                                         param->param);
807                 return -EINVAL;
808         }
809
810         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
811                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
812                 return -EFAULT;
813         }
814
815         return 0;
816 }
817
818 static int i915_setparam(struct drm_device *dev, void *data,
819                          struct drm_file *file_priv)
820 {
821         drm_i915_private_t *dev_priv = dev->dev_private;
822         drm_i915_setparam_t *param = data;
823
824         if (!dev_priv) {
825                 DRM_ERROR("called with no initialization\n");
826                 return -EINVAL;
827         }
828
829         switch (param->param) {
830         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
831                 break;
832         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
833                 dev_priv->tex_lru_log_granularity = param->value;
834                 break;
835         case I915_SETPARAM_ALLOW_BATCHBUFFER:
836                 dev_priv->allow_batchbuffer = param->value;
837                 break;
838         case I915_SETPARAM_NUM_USED_FENCES:
839                 if (param->value > dev_priv->num_fence_regs ||
840                     param->value < 0)
841                         return -EINVAL;
842                 /* Userspace can use first N regs */
843                 dev_priv->fence_reg_start = param->value;
844                 break;
845         default:
846                 DRM_DEBUG_DRIVER("unknown parameter %d\n",
847                                         param->param);
848                 return -EINVAL;
849         }
850
851         return 0;
852 }
853
854 static int i915_set_status_page(struct drm_device *dev, void *data,
855                                 struct drm_file *file_priv)
856 {
857         drm_i915_private_t *dev_priv = dev->dev_private;
858         drm_i915_hws_addr_t *hws = data;
859
860         if (!I915_NEED_GFX_HWS(dev))
861                 return -EINVAL;
862
863         if (!dev_priv) {
864                 DRM_ERROR("called with no initialization\n");
865                 return -EINVAL;
866         }
867
868         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
869                 WARN(1, "tried to set status page when mode setting active\n");
870                 return 0;
871         }
872
873         DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
874
875         dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
876
877         dev_priv->hws_map.offset = dev->agp->base + hws->addr;
878         dev_priv->hws_map.size = 4*1024;
879         dev_priv->hws_map.type = 0;
880         dev_priv->hws_map.flags = 0;
881         dev_priv->hws_map.mtrr = 0;
882
883         drm_core_ioremap_wc(&dev_priv->hws_map, dev);
884         if (dev_priv->hws_map.handle == NULL) {
885                 i915_dma_cleanup(dev);
886                 dev_priv->status_gfx_addr = 0;
887                 DRM_ERROR("can not ioremap virtual address for"
888                                 " G33 hw status page\n");
889                 return -ENOMEM;
890         }
891         dev_priv->hw_status_page = dev_priv->hws_map.handle;
892
893         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
894         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
895         DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
896                                 dev_priv->status_gfx_addr);
897         DRM_DEBUG_DRIVER("load hws at %p\n",
898                                 dev_priv->hw_status_page);
899         return 0;
900 }
901
902 static int i915_get_bridge_dev(struct drm_device *dev)
903 {
904         struct drm_i915_private *dev_priv = dev->dev_private;
905
906         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
907         if (!dev_priv->bridge_dev) {
908                 DRM_ERROR("bridge device not found\n");
909                 return -1;
910         }
911         return 0;
912 }
913
914 /**
915  * i915_probe_agp - get AGP bootup configuration
916  * @pdev: PCI device
917  * @aperture_size: returns AGP aperture configured size
918  * @preallocated_size: returns size of BIOS preallocated AGP space
919  *
920  * Since Intel integrated graphics are UMA, the BIOS has to set aside
921  * some RAM for the framebuffer at early boot.  This code figures out
922  * how much was set aside so we can use it for our own purposes.
923  */
924 static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
925                           uint32_t *preallocated_size)
926 {
927         struct drm_i915_private *dev_priv = dev->dev_private;
928         u16 tmp = 0;
929         unsigned long overhead;
930         unsigned long stolen;
931
932         /* Get the fb aperture size and "stolen" memory amount. */
933         pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
934
935         *aperture_size = 1024 * 1024;
936         *preallocated_size = 1024 * 1024;
937
938         switch (dev->pdev->device) {
939         case PCI_DEVICE_ID_INTEL_82830_CGC:
940         case PCI_DEVICE_ID_INTEL_82845G_IG:
941         case PCI_DEVICE_ID_INTEL_82855GM_IG:
942         case PCI_DEVICE_ID_INTEL_82865_IG:
943                 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
944                         *aperture_size *= 64;
945                 else
946                         *aperture_size *= 128;
947                 break;
948         default:
949                 /* 9xx supports large sizes, just look at the length */
950                 *aperture_size = pci_resource_len(dev->pdev, 2);
951                 break;
952         }
953
954         /*
955          * Some of the preallocated space is taken by the GTT
956          * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
957          */
958         if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
959                 overhead = 4096;
960         else
961                 overhead = (*aperture_size / 1024) + 4096;
962
963         switch (tmp & INTEL_GMCH_GMS_MASK) {
964         case INTEL_855_GMCH_GMS_DISABLED:
965                 DRM_ERROR("video memory is disabled\n");
966                 return -1;
967         case INTEL_855_GMCH_GMS_STOLEN_1M:
968                 stolen = 1 * 1024 * 1024;
969                 break;
970         case INTEL_855_GMCH_GMS_STOLEN_4M:
971                 stolen = 4 * 1024 * 1024;
972                 break;
973         case INTEL_855_GMCH_GMS_STOLEN_8M:
974                 stolen = 8 * 1024 * 1024;
975                 break;
976         case INTEL_855_GMCH_GMS_STOLEN_16M:
977                 stolen = 16 * 1024 * 1024;
978                 break;
979         case INTEL_855_GMCH_GMS_STOLEN_32M:
980                 stolen = 32 * 1024 * 1024;
981                 break;
982         case INTEL_915G_GMCH_GMS_STOLEN_48M:
983                 stolen = 48 * 1024 * 1024;
984                 break;
985         case INTEL_915G_GMCH_GMS_STOLEN_64M:
986                 stolen = 64 * 1024 * 1024;
987                 break;
988         case INTEL_GMCH_GMS_STOLEN_128M:
989                 stolen = 128 * 1024 * 1024;
990                 break;
991         case INTEL_GMCH_GMS_STOLEN_256M:
992                 stolen = 256 * 1024 * 1024;
993                 break;
994         case INTEL_GMCH_GMS_STOLEN_96M:
995                 stolen = 96 * 1024 * 1024;
996                 break;
997         case INTEL_GMCH_GMS_STOLEN_160M:
998                 stolen = 160 * 1024 * 1024;
999                 break;
1000         case INTEL_GMCH_GMS_STOLEN_224M:
1001                 stolen = 224 * 1024 * 1024;
1002                 break;
1003         case INTEL_GMCH_GMS_STOLEN_352M:
1004                 stolen = 352 * 1024 * 1024;
1005                 break;
1006         default:
1007                 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
1008                         tmp & INTEL_GMCH_GMS_MASK);
1009                 return -1;
1010         }
1011         *preallocated_size = stolen - overhead;
1012
1013         return 0;
1014 }
1015
1016 /* true = enable decode, false = disable decoder */
1017 static unsigned int i915_vga_set_decode(void *cookie, bool state)
1018 {
1019         struct drm_device *dev = cookie;
1020
1021         intel_modeset_vga_set_state(dev, state);
1022         if (state)
1023                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1024                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1025         else
1026                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1027 }
1028
1029 static int i915_load_modeset_init(struct drm_device *dev,
1030                                   unsigned long prealloc_size,
1031                                   unsigned long agp_size)
1032 {
1033         struct drm_i915_private *dev_priv = dev->dev_private;
1034         int fb_bar = IS_I9XX(dev) ? 2 : 0;
1035         int ret = 0;
1036
1037         dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
1038                 0xff000000;
1039
1040         if (IS_MOBILE(dev) || IS_I9XX(dev))
1041                 dev_priv->cursor_needs_physical = true;
1042         else
1043                 dev_priv->cursor_needs_physical = false;
1044
1045         if (IS_I965G(dev) || IS_G33(dev))
1046                 dev_priv->cursor_needs_physical = false;
1047
1048         /* Basic memrange allocator for stolen space (aka vram) */
1049         drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1050
1051         /* Let GEM Manage from end of prealloc space to end of aperture.
1052          *
1053          * However, leave one page at the end still bound to the scratch page.
1054          * There are a number of places where the hardware apparently
1055          * prefetches past the end of the object, and we've seen multiple
1056          * hangs with the GPU head pointer stuck in a batchbuffer bound
1057          * at the last page of the aperture.  One page should be enough to
1058          * keep any prefetching inside of the aperture.
1059          */
1060         i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
1061
1062         ret = i915_gem_init_ringbuffer(dev);
1063         if (ret)
1064                 goto out;
1065
1066         /* Allow hardware batchbuffers unless told otherwise.
1067          */
1068         dev_priv->allow_batchbuffer = 1;
1069
1070         ret = intel_init_bios(dev);
1071         if (ret)
1072                 DRM_INFO("failed to find VBIOS tables\n");
1073
1074         /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1075         ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
1076         if (ret)
1077                 goto destroy_ringbuffer;
1078
1079         ret = drm_irq_install(dev);
1080         if (ret)
1081                 goto destroy_ringbuffer;
1082
1083         /* Always safe in the mode setting case. */
1084         /* FIXME: do pre/post-mode set stuff in core KMS code */
1085         dev->vblank_disable_allowed = 1;
1086
1087         /*
1088          * Initialize the hardware status page IRQ location.
1089          */
1090
1091         I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1092
1093         intel_modeset_init(dev);
1094
1095         drm_helper_initial_config(dev);
1096
1097         return 0;
1098
1099 destroy_ringbuffer:
1100         i915_gem_cleanup_ringbuffer(dev);
1101 out:
1102         return ret;
1103 }
1104
1105 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1106 {
1107         struct drm_i915_master_private *master_priv;
1108
1109         master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1110         if (!master_priv)
1111                 return -ENOMEM;
1112
1113         master->driver_priv = master_priv;
1114         return 0;
1115 }
1116
1117 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1118 {
1119         struct drm_i915_master_private *master_priv = master->driver_priv;
1120
1121         if (!master_priv)
1122                 return;
1123
1124         kfree(master_priv);
1125
1126         master->driver_priv = NULL;
1127 }
1128
1129 static void i915_get_mem_freq(struct drm_device *dev)
1130 {
1131         drm_i915_private_t *dev_priv = dev->dev_private;
1132         u32 tmp;
1133
1134         if (!IS_IGD(dev))
1135                 return;
1136
1137         tmp = I915_READ(CLKCFG);
1138
1139         switch (tmp & CLKCFG_FSB_MASK) {
1140         case CLKCFG_FSB_533:
1141                 dev_priv->fsb_freq = 533; /* 133*4 */
1142                 break;
1143         case CLKCFG_FSB_800:
1144                 dev_priv->fsb_freq = 800; /* 200*4 */
1145                 break;
1146         case CLKCFG_FSB_667:
1147                 dev_priv->fsb_freq =  667; /* 167*4 */
1148                 break;
1149         case CLKCFG_FSB_400:
1150                 dev_priv->fsb_freq = 400; /* 100*4 */
1151                 break;
1152         }
1153
1154         switch (tmp & CLKCFG_MEM_MASK) {
1155         case CLKCFG_MEM_533:
1156                 dev_priv->mem_freq = 533;
1157                 break;
1158         case CLKCFG_MEM_667:
1159                 dev_priv->mem_freq = 667;
1160                 break;
1161         case CLKCFG_MEM_800:
1162                 dev_priv->mem_freq = 800;
1163                 break;
1164         }
1165 }
1166
1167 /**
1168  * i915_driver_load - setup chip and create an initial config
1169  * @dev: DRM device
1170  * @flags: startup flags
1171  *
1172  * The driver load routine has to do several things:
1173  *   - drive output discovery via intel_modeset_init()
1174  *   - initialize the memory manager
1175  *   - allocate initial config memory
1176  *   - setup the DRM framebuffer with the allocated memory
1177  */
1178 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1179 {
1180         struct drm_i915_private *dev_priv = dev->dev_private;
1181         resource_size_t base, size;
1182         int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1183         uint32_t agp_size, prealloc_size;
1184
1185         /* i915 has 4 more counters */
1186         dev->counters += 4;
1187         dev->types[6] = _DRM_STAT_IRQ;
1188         dev->types[7] = _DRM_STAT_PRIMARY;
1189         dev->types[8] = _DRM_STAT_SECONDARY;
1190         dev->types[9] = _DRM_STAT_DMA;
1191
1192         dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
1193         if (dev_priv == NULL)
1194                 return -ENOMEM;
1195
1196         dev->dev_private = (void *)dev_priv;
1197         dev_priv->dev = dev;
1198
1199         /* Add register map (needed for suspend/resume) */
1200         base = drm_get_resource_start(dev, mmio_bar);
1201         size = drm_get_resource_len(dev, mmio_bar);
1202
1203         if (i915_get_bridge_dev(dev)) {
1204                 ret = -EIO;
1205                 goto free_priv;
1206         }
1207
1208         dev_priv->regs = ioremap(base, size);
1209         if (!dev_priv->regs) {
1210                 DRM_ERROR("failed to map registers\n");
1211                 ret = -EIO;
1212                 goto put_bridge;
1213         }
1214
1215         dev_priv->mm.gtt_mapping =
1216                 io_mapping_create_wc(dev->agp->base,
1217                                      dev->agp->agp_info.aper_size * 1024*1024);
1218         if (dev_priv->mm.gtt_mapping == NULL) {
1219                 ret = -EIO;
1220                 goto out_rmmap;
1221         }
1222
1223         /* Set up a WC MTRR for non-PAT systems.  This is more common than
1224          * one would think, because the kernel disables PAT on first
1225          * generation Core chips because WC PAT gets overridden by a UC
1226          * MTRR if present.  Even if a UC MTRR isn't present.
1227          */
1228         dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1229                                          dev->agp->agp_info.aper_size *
1230                                          1024 * 1024,
1231                                          MTRR_TYPE_WRCOMB, 1);
1232         if (dev_priv->mm.gtt_mtrr < 0) {
1233                 DRM_INFO("MTRR allocation failed.  Graphics "
1234                          "performance may suffer.\n");
1235         }
1236
1237         ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
1238         if (ret)
1239                 goto out_iomapfree;
1240
1241         dev_priv->wq = create_workqueue("i915");
1242         if (dev_priv->wq == NULL) {
1243                 DRM_ERROR("Failed to create our workqueue.\n");
1244                 ret = -ENOMEM;
1245                 goto out_iomapfree;
1246         }
1247
1248         /* enable GEM by default */
1249         dev_priv->has_gem = 1;
1250
1251         if (prealloc_size > agp_size * 3 / 4) {
1252                 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
1253                           "memory stolen.\n",
1254                           prealloc_size / 1024, agp_size / 1024);
1255                 DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
1256                           "updating the BIOS to fix).\n");
1257                 dev_priv->has_gem = 0;
1258         }
1259
1260         dev->driver->get_vblank_counter = i915_get_vblank_counter;
1261         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1262         if (IS_G4X(dev) || IS_IGDNG(dev)) {
1263                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1264                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1265         }
1266
1267         i915_gem_load(dev);
1268
1269         /* Init HWS */
1270         if (!I915_NEED_GFX_HWS(dev)) {
1271                 ret = i915_init_phys_hws(dev);
1272                 if (ret != 0)
1273                         goto out_workqueue_free;
1274         }
1275
1276         i915_get_mem_freq(dev);
1277
1278         /* On the 945G/GM, the chipset reports the MSI capability on the
1279          * integrated graphics even though the support isn't actually there
1280          * according to the published specs.  It doesn't appear to function
1281          * correctly in testing on 945G.
1282          * This may be a side effect of MSI having been made available for PEG
1283          * and the registers being closely associated.
1284          *
1285          * According to chipset errata, on the 965GM, MSI interrupts may
1286          * be lost or delayed, but we use them anyways to avoid
1287          * stuck interrupts on some machines.
1288          */
1289         if (!IS_I945G(dev) && !IS_I945GM(dev))
1290                 pci_enable_msi(dev->pdev);
1291
1292         spin_lock_init(&dev_priv->user_irq_lock);
1293         spin_lock_init(&dev_priv->error_lock);
1294         dev_priv->user_irq_refcount = 0;
1295
1296         ret = drm_vblank_init(dev, I915_NUM_PIPE);
1297
1298         if (ret) {
1299                 (void) i915_driver_unload(dev);
1300                 return ret;
1301         }
1302
1303         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1304                 ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
1305                 if (ret < 0) {
1306                         DRM_ERROR("failed to init modeset\n");
1307                         goto out_workqueue_free;
1308                 }
1309         }
1310
1311         /* Must be done after probing outputs */
1312         /* FIXME: verify on IGDNG */
1313         if (!IS_IGDNG(dev))
1314                 intel_opregion_init(dev, 0);
1315
1316         return 0;
1317
1318 out_workqueue_free:
1319         destroy_workqueue(dev_priv->wq);
1320 out_iomapfree:
1321         io_mapping_free(dev_priv->mm.gtt_mapping);
1322 out_rmmap:
1323         iounmap(dev_priv->regs);
1324 put_bridge:
1325         pci_dev_put(dev_priv->bridge_dev);
1326 free_priv:
1327         kfree(dev_priv);
1328         return ret;
1329 }
1330
1331 int i915_driver_unload(struct drm_device *dev)
1332 {
1333         struct drm_i915_private *dev_priv = dev->dev_private;
1334
1335         destroy_workqueue(dev_priv->wq);
1336
1337         io_mapping_free(dev_priv->mm.gtt_mapping);
1338         if (dev_priv->mm.gtt_mtrr >= 0) {
1339                 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1340                          dev->agp->agp_info.aper_size * 1024 * 1024);
1341                 dev_priv->mm.gtt_mtrr = -1;
1342         }
1343
1344         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1345                 drm_irq_uninstall(dev);
1346                 vga_client_register(dev->pdev, NULL, NULL, NULL);
1347         }
1348
1349         if (dev->pdev->msi_enabled)
1350                 pci_disable_msi(dev->pdev);
1351
1352         if (dev_priv->regs != NULL)
1353                 iounmap(dev_priv->regs);
1354
1355         if (!IS_IGDNG(dev))
1356                 intel_opregion_free(dev, 0);
1357
1358         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1359                 intel_modeset_cleanup(dev);
1360
1361                 i915_gem_free_all_phys_object(dev);
1362
1363                 mutex_lock(&dev->struct_mutex);
1364                 i915_gem_cleanup_ringbuffer(dev);
1365                 mutex_unlock(&dev->struct_mutex);
1366                 drm_mm_takedown(&dev_priv->vram);
1367                 i915_gem_lastclose(dev);
1368         }
1369
1370         pci_dev_put(dev_priv->bridge_dev);
1371         kfree(dev->dev_private);
1372
1373         return 0;
1374 }
1375
1376 int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1377 {
1378         struct drm_i915_file_private *i915_file_priv;
1379
1380         DRM_DEBUG_DRIVER("\n");
1381         i915_file_priv = (struct drm_i915_file_private *)
1382             kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
1383
1384         if (!i915_file_priv)
1385                 return -ENOMEM;
1386
1387         file_priv->driver_priv = i915_file_priv;
1388
1389         INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1390
1391         return 0;
1392 }
1393
1394 /**
1395  * i915_driver_lastclose - clean up after all DRM clients have exited
1396  * @dev: DRM device
1397  *
1398  * Take care of cleaning up after all DRM clients have exited.  In the
1399  * mode setting case, we want to restore the kernel's initial mode (just
1400  * in case the last client left us in a bad state).
1401  *
1402  * Additionally, in the non-mode setting case, we'll tear down the AGP
1403  * and DMA structures, since the kernel won't be using them, and clea
1404  * up any GEM state.
1405  */
1406 void i915_driver_lastclose(struct drm_device * dev)
1407 {
1408         drm_i915_private_t *dev_priv = dev->dev_private;
1409
1410         if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1411                 drm_fb_helper_restore();
1412                 return;
1413         }
1414
1415         i915_gem_lastclose(dev);
1416
1417         if (dev_priv->agp_heap)
1418                 i915_mem_takedown(&(dev_priv->agp_heap));
1419
1420         i915_dma_cleanup(dev);
1421 }
1422
1423 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1424 {
1425         drm_i915_private_t *dev_priv = dev->dev_private;
1426         i915_gem_release(dev, file_priv);
1427         if (!drm_core_check_feature(dev, DRIVER_MODESET))
1428                 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1429 }
1430
1431 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1432 {
1433         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1434
1435         kfree(i915_file_priv);
1436 }
1437
1438 struct drm_ioctl_desc i915_ioctls[] = {
1439         DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1440         DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1441         DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1442         DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1443         DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1444         DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1445         DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1446         DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1447         DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1448         DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1449         DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1450         DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1451         DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1452         DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1453         DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1454         DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1455         DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1456         DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1457         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1458         DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1459         DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1460         DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
1461         DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1462         DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1463         DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1464         DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1465         DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
1466         DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
1467         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1468         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
1469         DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
1470         DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
1471         DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1472         DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1473         DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
1474         DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1475 };
1476
1477 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1478
1479 /**
1480  * Determine if the device really is AGP or not.
1481  *
1482  * All Intel graphics chipsets are treated as AGP, even if they are really
1483  * PCI-e.
1484  *
1485  * \param dev   The device to be tested.
1486  *
1487  * \returns
1488  * A value of 1 is always retured to indictate every i9x5 is AGP.
1489  */
1490 int i915_driver_device_is_agp(struct drm_device * dev)
1491 {
1492         return 1;
1493 }