]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/gpu/drm/i915/i915_dma.c
x86_64: fix incorrect comments
[mv-sheeva.git] / drivers / gpu / drm / i915 / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "drm_crtc_helper.h"
32 #include "intel_drv.h"
33 #include "i915_drm.h"
34 #include "i915_drv.h"
35
36 /* Really want an OS-independent resettable timer.  Would like to have
37  * this loop run for (eg) 3 sec, but have the timer reset every time
38  * the head pointer changes, so that EBUSY only happens if the ring
39  * actually stalls for (eg) 3 seconds.
40  */
41 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
42 {
43         drm_i915_private_t *dev_priv = dev->dev_private;
44         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
45         u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
46         u32 last_acthd = I915_READ(acthd_reg);
47         u32 acthd;
48         u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
49         int i;
50
51         for (i = 0; i < 100000; i++) {
52                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
53                 acthd = I915_READ(acthd_reg);
54                 ring->space = ring->head - (ring->tail + 8);
55                 if (ring->space < 0)
56                         ring->space += ring->Size;
57                 if (ring->space >= n)
58                         return 0;
59
60                 if (dev->primary->master) {
61                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
62                         if (master_priv->sarea_priv)
63                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
64                 }
65
66
67                 if (ring->head != last_head)
68                         i = 0;
69                 if (acthd != last_acthd)
70                         i = 0;
71
72                 last_head = ring->head;
73                 last_acthd = acthd;
74                 msleep_interruptible(10);
75
76         }
77
78         return -EBUSY;
79 }
80
81 /**
82  * Sets up the hardware status page for devices that need a physical address
83  * in the register.
84  */
85 static int i915_init_phys_hws(struct drm_device *dev)
86 {
87         drm_i915_private_t *dev_priv = dev->dev_private;
88         /* Program Hardware Status Page */
89         dev_priv->status_page_dmah =
90                 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
91
92         if (!dev_priv->status_page_dmah) {
93                 DRM_ERROR("Can not allocate hardware status page\n");
94                 return -ENOMEM;
95         }
96         dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
97         dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
98
99         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
100
101         I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
102         DRM_DEBUG("Enabled hardware status page\n");
103         return 0;
104 }
105
106 /**
107  * Frees the hardware status page, whether it's a physical address or a virtual
108  * address set up by the X Server.
109  */
110 static void i915_free_hws(struct drm_device *dev)
111 {
112         drm_i915_private_t *dev_priv = dev->dev_private;
113         if (dev_priv->status_page_dmah) {
114                 drm_pci_free(dev, dev_priv->status_page_dmah);
115                 dev_priv->status_page_dmah = NULL;
116         }
117
118         if (dev_priv->status_gfx_addr) {
119                 dev_priv->status_gfx_addr = 0;
120                 drm_core_ioremapfree(&dev_priv->hws_map, dev);
121         }
122
123         /* Need to rewrite hardware status page */
124         I915_WRITE(HWS_PGA, 0x1ffff000);
125 }
126
127 void i915_kernel_lost_context(struct drm_device * dev)
128 {
129         drm_i915_private_t *dev_priv = dev->dev_private;
130         struct drm_i915_master_private *master_priv;
131         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
132
133         /*
134          * We should never lose context on the ring with modesetting
135          * as we don't expose it to userspace
136          */
137         if (drm_core_check_feature(dev, DRIVER_MODESET))
138                 return;
139
140         ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
141         ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
142         ring->space = ring->head - (ring->tail + 8);
143         if (ring->space < 0)
144                 ring->space += ring->Size;
145
146         if (!dev->primary->master)
147                 return;
148
149         master_priv = dev->primary->master->driver_priv;
150         if (ring->head == ring->tail && master_priv->sarea_priv)
151                 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
152 }
153
154 static int i915_dma_cleanup(struct drm_device * dev)
155 {
156         drm_i915_private_t *dev_priv = dev->dev_private;
157         /* Make sure interrupts are disabled here because the uninstall ioctl
158          * may not have been called from userspace and after dev_private
159          * is freed, it's too late.
160          */
161         if (dev->irq_enabled)
162                 drm_irq_uninstall(dev);
163
164         if (dev_priv->ring.virtual_start) {
165                 drm_core_ioremapfree(&dev_priv->ring.map, dev);
166                 dev_priv->ring.virtual_start = NULL;
167                 dev_priv->ring.map.handle = NULL;
168                 dev_priv->ring.map.size = 0;
169         }
170
171         /* Clear the HWS virtual address at teardown */
172         if (I915_NEED_GFX_HWS(dev))
173                 i915_free_hws(dev);
174
175         return 0;
176 }
177
178 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
179 {
180         drm_i915_private_t *dev_priv = dev->dev_private;
181         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
182
183         master_priv->sarea = drm_getsarea(dev);
184         if (master_priv->sarea) {
185                 master_priv->sarea_priv = (drm_i915_sarea_t *)
186                         ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
187         } else {
188                 DRM_DEBUG("sarea not found assuming DRI2 userspace\n");
189         }
190
191         if (init->ring_size != 0) {
192                 if (dev_priv->ring.ring_obj != NULL) {
193                         i915_dma_cleanup(dev);
194                         DRM_ERROR("Client tried to initialize ringbuffer in "
195                                   "GEM mode\n");
196                         return -EINVAL;
197                 }
198
199                 dev_priv->ring.Size = init->ring_size;
200                 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
201
202                 dev_priv->ring.map.offset = init->ring_start;
203                 dev_priv->ring.map.size = init->ring_size;
204                 dev_priv->ring.map.type = 0;
205                 dev_priv->ring.map.flags = 0;
206                 dev_priv->ring.map.mtrr = 0;
207
208                 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
209
210                 if (dev_priv->ring.map.handle == NULL) {
211                         i915_dma_cleanup(dev);
212                         DRM_ERROR("can not ioremap virtual address for"
213                                   " ring buffer\n");
214                         return -ENOMEM;
215                 }
216         }
217
218         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
219
220         dev_priv->cpp = init->cpp;
221         dev_priv->back_offset = init->back_offset;
222         dev_priv->front_offset = init->front_offset;
223         dev_priv->current_page = 0;
224         if (master_priv->sarea_priv)
225                 master_priv->sarea_priv->pf_current_page = 0;
226
227         /* Allow hardware batchbuffers unless told otherwise.
228          */
229         dev_priv->allow_batchbuffer = 1;
230
231         return 0;
232 }
233
234 static int i915_dma_resume(struct drm_device * dev)
235 {
236         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
237
238         DRM_DEBUG("%s\n", __func__);
239
240         if (dev_priv->ring.map.handle == NULL) {
241                 DRM_ERROR("can not ioremap virtual address for"
242                           " ring buffer\n");
243                 return -ENOMEM;
244         }
245
246         /* Program Hardware Status Page */
247         if (!dev_priv->hw_status_page) {
248                 DRM_ERROR("Can not find hardware status page\n");
249                 return -EINVAL;
250         }
251         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
252
253         if (dev_priv->status_gfx_addr != 0)
254                 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
255         else
256                 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
257         DRM_DEBUG("Enabled hardware status page\n");
258
259         return 0;
260 }
261
262 static int i915_dma_init(struct drm_device *dev, void *data,
263                          struct drm_file *file_priv)
264 {
265         drm_i915_init_t *init = data;
266         int retcode = 0;
267
268         switch (init->func) {
269         case I915_INIT_DMA:
270                 retcode = i915_initialize(dev, init);
271                 break;
272         case I915_CLEANUP_DMA:
273                 retcode = i915_dma_cleanup(dev);
274                 break;
275         case I915_RESUME_DMA:
276                 retcode = i915_dma_resume(dev);
277                 break;
278         default:
279                 retcode = -EINVAL;
280                 break;
281         }
282
283         return retcode;
284 }
285
286 /* Implement basically the same security restrictions as hardware does
287  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
288  *
289  * Most of the calculations below involve calculating the size of a
290  * particular instruction.  It's important to get the size right as
291  * that tells us where the next instruction to check is.  Any illegal
292  * instruction detected will be given a size of zero, which is a
293  * signal to abort the rest of the buffer.
294  */
295 static int do_validate_cmd(int cmd)
296 {
297         switch (((cmd >> 29) & 0x7)) {
298         case 0x0:
299                 switch ((cmd >> 23) & 0x3f) {
300                 case 0x0:
301                         return 1;       /* MI_NOOP */
302                 case 0x4:
303                         return 1;       /* MI_FLUSH */
304                 default:
305                         return 0;       /* disallow everything else */
306                 }
307                 break;
308         case 0x1:
309                 return 0;       /* reserved */
310         case 0x2:
311                 return (cmd & 0xff) + 2;        /* 2d commands */
312         case 0x3:
313                 if (((cmd >> 24) & 0x1f) <= 0x18)
314                         return 1;
315
316                 switch ((cmd >> 24) & 0x1f) {
317                 case 0x1c:
318                         return 1;
319                 case 0x1d:
320                         switch ((cmd >> 16) & 0xff) {
321                         case 0x3:
322                                 return (cmd & 0x1f) + 2;
323                         case 0x4:
324                                 return (cmd & 0xf) + 2;
325                         default:
326                                 return (cmd & 0xffff) + 2;
327                         }
328                 case 0x1e:
329                         if (cmd & (1 << 23))
330                                 return (cmd & 0xffff) + 1;
331                         else
332                                 return 1;
333                 case 0x1f:
334                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
335                                 return (cmd & 0x1ffff) + 2;
336                         else if (cmd & (1 << 17))       /* indirect random */
337                                 if ((cmd & 0xffff) == 0)
338                                         return 0;       /* unknown length, too hard */
339                                 else
340                                         return (((cmd & 0xffff) + 1) / 2) + 1;
341                         else
342                                 return 2;       /* indirect sequential */
343                 default:
344                         return 0;
345                 }
346         default:
347                 return 0;
348         }
349
350         return 0;
351 }
352
353 static int validate_cmd(int cmd)
354 {
355         int ret = do_validate_cmd(cmd);
356
357 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
358
359         return ret;
360 }
361
362 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
363 {
364         drm_i915_private_t *dev_priv = dev->dev_private;
365         int i;
366         RING_LOCALS;
367
368         if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
369                 return -EINVAL;
370
371         BEGIN_LP_RING((dwords+1)&~1);
372
373         for (i = 0; i < dwords;) {
374                 int cmd, sz;
375
376                 cmd = buffer[i];
377
378                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
379                         return -EINVAL;
380
381                 OUT_RING(cmd);
382
383                 while (++i, --sz) {
384                         OUT_RING(buffer[i]);
385                 }
386         }
387
388         if (dwords & 1)
389                 OUT_RING(0);
390
391         ADVANCE_LP_RING();
392
393         return 0;
394 }
395
396 int
397 i915_emit_box(struct drm_device *dev,
398               struct drm_clip_rect *boxes,
399               int i, int DR1, int DR4)
400 {
401         drm_i915_private_t *dev_priv = dev->dev_private;
402         struct drm_clip_rect box = boxes[i];
403         RING_LOCALS;
404
405         if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
406                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
407                           box.x1, box.y1, box.x2, box.y2);
408                 return -EINVAL;
409         }
410
411         if (IS_I965G(dev)) {
412                 BEGIN_LP_RING(4);
413                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
414                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
415                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
416                 OUT_RING(DR4);
417                 ADVANCE_LP_RING();
418         } else {
419                 BEGIN_LP_RING(6);
420                 OUT_RING(GFX_OP_DRAWRECT_INFO);
421                 OUT_RING(DR1);
422                 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
423                 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
424                 OUT_RING(DR4);
425                 OUT_RING(0);
426                 ADVANCE_LP_RING();
427         }
428
429         return 0;
430 }
431
432 /* XXX: Emitting the counter should really be moved to part of the IRQ
433  * emit. For now, do it in both places:
434  */
435
436 static void i915_emit_breadcrumb(struct drm_device *dev)
437 {
438         drm_i915_private_t *dev_priv = dev->dev_private;
439         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
440         RING_LOCALS;
441
442         dev_priv->counter++;
443         if (dev_priv->counter > 0x7FFFFFFFUL)
444                 dev_priv->counter = 0;
445         if (master_priv->sarea_priv)
446                 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
447
448         BEGIN_LP_RING(4);
449         OUT_RING(MI_STORE_DWORD_INDEX);
450         OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
451         OUT_RING(dev_priv->counter);
452         OUT_RING(0);
453         ADVANCE_LP_RING();
454 }
455
456 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
457                                    drm_i915_cmdbuffer_t *cmd,
458                                    struct drm_clip_rect *cliprects,
459                                    void *cmdbuf)
460 {
461         int nbox = cmd->num_cliprects;
462         int i = 0, count, ret;
463
464         if (cmd->sz & 0x3) {
465                 DRM_ERROR("alignment");
466                 return -EINVAL;
467         }
468
469         i915_kernel_lost_context(dev);
470
471         count = nbox ? nbox : 1;
472
473         for (i = 0; i < count; i++) {
474                 if (i < nbox) {
475                         ret = i915_emit_box(dev, cliprects, i,
476                                             cmd->DR1, cmd->DR4);
477                         if (ret)
478                                 return ret;
479                 }
480
481                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
482                 if (ret)
483                         return ret;
484         }
485
486         i915_emit_breadcrumb(dev);
487         return 0;
488 }
489
490 static int i915_dispatch_batchbuffer(struct drm_device * dev,
491                                      drm_i915_batchbuffer_t * batch,
492                                      struct drm_clip_rect *cliprects)
493 {
494         drm_i915_private_t *dev_priv = dev->dev_private;
495         int nbox = batch->num_cliprects;
496         int i = 0, count;
497         RING_LOCALS;
498
499         if ((batch->start | batch->used) & 0x7) {
500                 DRM_ERROR("alignment");
501                 return -EINVAL;
502         }
503
504         i915_kernel_lost_context(dev);
505
506         count = nbox ? nbox : 1;
507
508         for (i = 0; i < count; i++) {
509                 if (i < nbox) {
510                         int ret = i915_emit_box(dev, cliprects, i,
511                                                 batch->DR1, batch->DR4);
512                         if (ret)
513                                 return ret;
514                 }
515
516                 if (!IS_I830(dev) && !IS_845G(dev)) {
517                         BEGIN_LP_RING(2);
518                         if (IS_I965G(dev)) {
519                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
520                                 OUT_RING(batch->start);
521                         } else {
522                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
523                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
524                         }
525                         ADVANCE_LP_RING();
526                 } else {
527                         BEGIN_LP_RING(4);
528                         OUT_RING(MI_BATCH_BUFFER);
529                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
530                         OUT_RING(batch->start + batch->used - 4);
531                         OUT_RING(0);
532                         ADVANCE_LP_RING();
533                 }
534         }
535
536         i915_emit_breadcrumb(dev);
537
538         return 0;
539 }
540
541 static int i915_dispatch_flip(struct drm_device * dev)
542 {
543         drm_i915_private_t *dev_priv = dev->dev_private;
544         struct drm_i915_master_private *master_priv =
545                 dev->primary->master->driver_priv;
546         RING_LOCALS;
547
548         if (!master_priv->sarea_priv)
549                 return -EINVAL;
550
551         DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
552                   __func__,
553                   dev_priv->current_page,
554                   master_priv->sarea_priv->pf_current_page);
555
556         i915_kernel_lost_context(dev);
557
558         BEGIN_LP_RING(2);
559         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
560         OUT_RING(0);
561         ADVANCE_LP_RING();
562
563         BEGIN_LP_RING(6);
564         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
565         OUT_RING(0);
566         if (dev_priv->current_page == 0) {
567                 OUT_RING(dev_priv->back_offset);
568                 dev_priv->current_page = 1;
569         } else {
570                 OUT_RING(dev_priv->front_offset);
571                 dev_priv->current_page = 0;
572         }
573         OUT_RING(0);
574         ADVANCE_LP_RING();
575
576         BEGIN_LP_RING(2);
577         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
578         OUT_RING(0);
579         ADVANCE_LP_RING();
580
581         master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
582
583         BEGIN_LP_RING(4);
584         OUT_RING(MI_STORE_DWORD_INDEX);
585         OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
586         OUT_RING(dev_priv->counter);
587         OUT_RING(0);
588         ADVANCE_LP_RING();
589
590         master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
591         return 0;
592 }
593
594 static int i915_quiescent(struct drm_device * dev)
595 {
596         drm_i915_private_t *dev_priv = dev->dev_private;
597
598         i915_kernel_lost_context(dev);
599         return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
600 }
601
602 static int i915_flush_ioctl(struct drm_device *dev, void *data,
603                             struct drm_file *file_priv)
604 {
605         int ret;
606
607         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
608
609         mutex_lock(&dev->struct_mutex);
610         ret = i915_quiescent(dev);
611         mutex_unlock(&dev->struct_mutex);
612
613         return ret;
614 }
615
616 static int i915_batchbuffer(struct drm_device *dev, void *data,
617                             struct drm_file *file_priv)
618 {
619         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
620         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
621         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
622             master_priv->sarea_priv;
623         drm_i915_batchbuffer_t *batch = data;
624         int ret;
625         struct drm_clip_rect *cliprects = NULL;
626
627         if (!dev_priv->allow_batchbuffer) {
628                 DRM_ERROR("Batchbuffer ioctl disabled\n");
629                 return -EINVAL;
630         }
631
632         DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
633                   batch->start, batch->used, batch->num_cliprects);
634
635         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
636
637         if (batch->num_cliprects < 0)
638                 return -EINVAL;
639
640         if (batch->num_cliprects) {
641                 cliprects = drm_calloc(batch->num_cliprects,
642                                        sizeof(struct drm_clip_rect),
643                                        DRM_MEM_DRIVER);
644                 if (cliprects == NULL)
645                         return -ENOMEM;
646
647                 ret = copy_from_user(cliprects, batch->cliprects,
648                                      batch->num_cliprects *
649                                      sizeof(struct drm_clip_rect));
650                 if (ret != 0)
651                         goto fail_free;
652         }
653
654         mutex_lock(&dev->struct_mutex);
655         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
656         mutex_unlock(&dev->struct_mutex);
657
658         if (sarea_priv)
659                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
660
661 fail_free:
662         drm_free(cliprects,
663                  batch->num_cliprects * sizeof(struct drm_clip_rect),
664                  DRM_MEM_DRIVER);
665
666         return ret;
667 }
668
669 static int i915_cmdbuffer(struct drm_device *dev, void *data,
670                           struct drm_file *file_priv)
671 {
672         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
673         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
674         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
675             master_priv->sarea_priv;
676         drm_i915_cmdbuffer_t *cmdbuf = data;
677         struct drm_clip_rect *cliprects = NULL;
678         void *batch_data;
679         int ret;
680
681         DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
682                   cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
683
684         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
685
686         if (cmdbuf->num_cliprects < 0)
687                 return -EINVAL;
688
689         batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
690         if (batch_data == NULL)
691                 return -ENOMEM;
692
693         ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
694         if (ret != 0)
695                 goto fail_batch_free;
696
697         if (cmdbuf->num_cliprects) {
698                 cliprects = drm_calloc(cmdbuf->num_cliprects,
699                                        sizeof(struct drm_clip_rect),
700                                        DRM_MEM_DRIVER);
701                 if (cliprects == NULL)
702                         goto fail_batch_free;
703
704                 ret = copy_from_user(cliprects, cmdbuf->cliprects,
705                                      cmdbuf->num_cliprects *
706                                      sizeof(struct drm_clip_rect));
707                 if (ret != 0)
708                         goto fail_clip_free;
709         }
710
711         mutex_lock(&dev->struct_mutex);
712         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
713         mutex_unlock(&dev->struct_mutex);
714         if (ret) {
715                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
716                 goto fail_batch_free;
717         }
718
719         if (sarea_priv)
720                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
721
722 fail_batch_free:
723         drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
724 fail_clip_free:
725         drm_free(cliprects,
726                  cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
727                  DRM_MEM_DRIVER);
728
729         return ret;
730 }
731
732 static int i915_flip_bufs(struct drm_device *dev, void *data,
733                           struct drm_file *file_priv)
734 {
735         int ret;
736
737         DRM_DEBUG("%s\n", __func__);
738
739         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
740
741         mutex_lock(&dev->struct_mutex);
742         ret = i915_dispatch_flip(dev);
743         mutex_unlock(&dev->struct_mutex);
744
745         return ret;
746 }
747
748 static int i915_getparam(struct drm_device *dev, void *data,
749                          struct drm_file *file_priv)
750 {
751         drm_i915_private_t *dev_priv = dev->dev_private;
752         drm_i915_getparam_t *param = data;
753         int value;
754
755         if (!dev_priv) {
756                 DRM_ERROR("called with no initialization\n");
757                 return -EINVAL;
758         }
759
760         switch (param->param) {
761         case I915_PARAM_IRQ_ACTIVE:
762                 value = dev->pdev->irq ? 1 : 0;
763                 break;
764         case I915_PARAM_ALLOW_BATCHBUFFER:
765                 value = dev_priv->allow_batchbuffer ? 1 : 0;
766                 break;
767         case I915_PARAM_LAST_DISPATCH:
768                 value = READ_BREADCRUMB(dev_priv);
769                 break;
770         case I915_PARAM_CHIPSET_ID:
771                 value = dev->pci_device;
772                 break;
773         case I915_PARAM_HAS_GEM:
774                 value = dev_priv->has_gem;
775                 break;
776         case I915_PARAM_NUM_FENCES_AVAIL:
777                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
778                 break;
779         default:
780                 DRM_DEBUG("Unknown parameter %d\n", param->param);
781                 return -EINVAL;
782         }
783
784         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
785                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
786                 return -EFAULT;
787         }
788
789         return 0;
790 }
791
792 static int i915_setparam(struct drm_device *dev, void *data,
793                          struct drm_file *file_priv)
794 {
795         drm_i915_private_t *dev_priv = dev->dev_private;
796         drm_i915_setparam_t *param = data;
797
798         if (!dev_priv) {
799                 DRM_ERROR("called with no initialization\n");
800                 return -EINVAL;
801         }
802
803         switch (param->param) {
804         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
805                 break;
806         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
807                 dev_priv->tex_lru_log_granularity = param->value;
808                 break;
809         case I915_SETPARAM_ALLOW_BATCHBUFFER:
810                 dev_priv->allow_batchbuffer = param->value;
811                 break;
812         case I915_SETPARAM_NUM_USED_FENCES:
813                 if (param->value > dev_priv->num_fence_regs ||
814                     param->value < 0)
815                         return -EINVAL;
816                 /* Userspace can use first N regs */
817                 dev_priv->fence_reg_start = param->value;
818                 break;
819         default:
820                 DRM_DEBUG("unknown parameter %d\n", param->param);
821                 return -EINVAL;
822         }
823
824         return 0;
825 }
826
827 static int i915_set_status_page(struct drm_device *dev, void *data,
828                                 struct drm_file *file_priv)
829 {
830         drm_i915_private_t *dev_priv = dev->dev_private;
831         drm_i915_hws_addr_t *hws = data;
832
833         if (!I915_NEED_GFX_HWS(dev))
834                 return -EINVAL;
835
836         if (!dev_priv) {
837                 DRM_ERROR("called with no initialization\n");
838                 return -EINVAL;
839         }
840
841         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
842                 WARN(1, "tried to set status page when mode setting active\n");
843                 return 0;
844         }
845
846         printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
847
848         dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
849
850         dev_priv->hws_map.offset = dev->agp->base + hws->addr;
851         dev_priv->hws_map.size = 4*1024;
852         dev_priv->hws_map.type = 0;
853         dev_priv->hws_map.flags = 0;
854         dev_priv->hws_map.mtrr = 0;
855
856         drm_core_ioremap_wc(&dev_priv->hws_map, dev);
857         if (dev_priv->hws_map.handle == NULL) {
858                 i915_dma_cleanup(dev);
859                 dev_priv->status_gfx_addr = 0;
860                 DRM_ERROR("can not ioremap virtual address for"
861                                 " G33 hw status page\n");
862                 return -ENOMEM;
863         }
864         dev_priv->hw_status_page = dev_priv->hws_map.handle;
865
866         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
867         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
868         DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
869                         dev_priv->status_gfx_addr);
870         DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
871         return 0;
872 }
873
874 /**
875  * i915_probe_agp - get AGP bootup configuration
876  * @pdev: PCI device
877  * @aperture_size: returns AGP aperture configured size
878  * @preallocated_size: returns size of BIOS preallocated AGP space
879  *
880  * Since Intel integrated graphics are UMA, the BIOS has to set aside
881  * some RAM for the framebuffer at early boot.  This code figures out
882  * how much was set aside so we can use it for our own purposes.
883  */
884 static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
885                           unsigned long *preallocated_size)
886 {
887         struct pci_dev *bridge_dev;
888         u16 tmp = 0;
889         unsigned long overhead;
890         unsigned long stolen;
891
892         bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
893         if (!bridge_dev) {
894                 DRM_ERROR("bridge device not found\n");
895                 return -1;
896         }
897
898         /* Get the fb aperture size and "stolen" memory amount. */
899         pci_read_config_word(bridge_dev, INTEL_GMCH_CTRL, &tmp);
900         pci_dev_put(bridge_dev);
901
902         *aperture_size = 1024 * 1024;
903         *preallocated_size = 1024 * 1024;
904
905         switch (dev->pdev->device) {
906         case PCI_DEVICE_ID_INTEL_82830_CGC:
907         case PCI_DEVICE_ID_INTEL_82845G_IG:
908         case PCI_DEVICE_ID_INTEL_82855GM_IG:
909         case PCI_DEVICE_ID_INTEL_82865_IG:
910                 if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
911                         *aperture_size *= 64;
912                 else
913                         *aperture_size *= 128;
914                 break;
915         default:
916                 /* 9xx supports large sizes, just look at the length */
917                 *aperture_size = pci_resource_len(dev->pdev, 2);
918                 break;
919         }
920
921         /*
922          * Some of the preallocated space is taken by the GTT
923          * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
924          */
925         if (IS_G4X(dev) || IS_IGD(dev))
926                 overhead = 4096;
927         else
928                 overhead = (*aperture_size / 1024) + 4096;
929
930         switch (tmp & INTEL_GMCH_GMS_MASK) {
931         case INTEL_855_GMCH_GMS_DISABLED:
932                 DRM_ERROR("video memory is disabled\n");
933                 return -1;
934         case INTEL_855_GMCH_GMS_STOLEN_1M:
935                 stolen = 1 * 1024 * 1024;
936                 break;
937         case INTEL_855_GMCH_GMS_STOLEN_4M:
938                 stolen = 4 * 1024 * 1024;
939                 break;
940         case INTEL_855_GMCH_GMS_STOLEN_8M:
941                 stolen = 8 * 1024 * 1024;
942                 break;
943         case INTEL_855_GMCH_GMS_STOLEN_16M:
944                 stolen = 16 * 1024 * 1024;
945                 break;
946         case INTEL_855_GMCH_GMS_STOLEN_32M:
947                 stolen = 32 * 1024 * 1024;
948                 break;
949         case INTEL_915G_GMCH_GMS_STOLEN_48M:
950                 stolen = 48 * 1024 * 1024;
951                 break;
952         case INTEL_915G_GMCH_GMS_STOLEN_64M:
953                 stolen = 64 * 1024 * 1024;
954                 break;
955         case INTEL_GMCH_GMS_STOLEN_128M:
956                 stolen = 128 * 1024 * 1024;
957                 break;
958         case INTEL_GMCH_GMS_STOLEN_256M:
959                 stolen = 256 * 1024 * 1024;
960                 break;
961         case INTEL_GMCH_GMS_STOLEN_96M:
962                 stolen = 96 * 1024 * 1024;
963                 break;
964         case INTEL_GMCH_GMS_STOLEN_160M:
965                 stolen = 160 * 1024 * 1024;
966                 break;
967         case INTEL_GMCH_GMS_STOLEN_224M:
968                 stolen = 224 * 1024 * 1024;
969                 break;
970         case INTEL_GMCH_GMS_STOLEN_352M:
971                 stolen = 352 * 1024 * 1024;
972                 break;
973         default:
974                 DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
975                         tmp & INTEL_GMCH_GMS_MASK);
976                 return -1;
977         }
978         *preallocated_size = stolen - overhead;
979
980         return 0;
981 }
982
983 static int i915_load_modeset_init(struct drm_device *dev)
984 {
985         struct drm_i915_private *dev_priv = dev->dev_private;
986         unsigned long agp_size, prealloc_size;
987         int fb_bar = IS_I9XX(dev) ? 2 : 0;
988         int ret = 0;
989
990         dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
991         if (!dev->devname) {
992                 ret = -ENOMEM;
993                 goto out;
994         }
995
996         dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
997                 0xff000000;
998
999         if (IS_MOBILE(dev) || IS_I9XX(dev))
1000                 dev_priv->cursor_needs_physical = true;
1001         else
1002                 dev_priv->cursor_needs_physical = false;
1003
1004         if (IS_I965G(dev) || IS_G33(dev))
1005                 dev_priv->cursor_needs_physical = false;
1006
1007         ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
1008         if (ret)
1009                 goto kfree_devname;
1010
1011         /* Basic memrange allocator for stolen space (aka vram) */
1012         drm_mm_init(&dev_priv->vram, 0, prealloc_size);
1013
1014         /* Let GEM Manage from end of prealloc space to end of aperture */
1015         i915_gem_do_init(dev, prealloc_size, agp_size);
1016
1017         ret = i915_gem_init_ringbuffer(dev);
1018         if (ret)
1019                 goto kfree_devname;
1020
1021         /* Allow hardware batchbuffers unless told otherwise.
1022          */
1023         dev_priv->allow_batchbuffer = 1;
1024
1025         ret = intel_init_bios(dev);
1026         if (ret)
1027                 DRM_INFO("failed to find VBIOS tables\n");
1028
1029         ret = drm_irq_install(dev);
1030         if (ret)
1031                 goto destroy_ringbuffer;
1032
1033         /* Always safe in the mode setting case. */
1034         /* FIXME: do pre/post-mode set stuff in core KMS code */
1035         dev->vblank_disable_allowed = 1;
1036
1037         /*
1038          * Initialize the hardware status page IRQ location.
1039          */
1040
1041         I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
1042
1043         intel_modeset_init(dev);
1044
1045         drm_helper_initial_config(dev);
1046
1047         return 0;
1048
1049 destroy_ringbuffer:
1050         i915_gem_cleanup_ringbuffer(dev);
1051 kfree_devname:
1052         kfree(dev->devname);
1053 out:
1054         return ret;
1055 }
1056
1057 int i915_master_create(struct drm_device *dev, struct drm_master *master)
1058 {
1059         struct drm_i915_master_private *master_priv;
1060
1061         master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
1062         if (!master_priv)
1063                 return -ENOMEM;
1064
1065         master->driver_priv = master_priv;
1066         return 0;
1067 }
1068
1069 void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1070 {
1071         struct drm_i915_master_private *master_priv = master->driver_priv;
1072
1073         if (!master_priv)
1074                 return;
1075
1076         drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
1077
1078         master->driver_priv = NULL;
1079 }
1080
1081 /**
1082  * i915_driver_load - setup chip and create an initial config
1083  * @dev: DRM device
1084  * @flags: startup flags
1085  *
1086  * The driver load routine has to do several things:
1087  *   - drive output discovery via intel_modeset_init()
1088  *   - initialize the memory manager
1089  *   - allocate initial config memory
1090  *   - setup the DRM framebuffer with the allocated memory
1091  */
1092 int i915_driver_load(struct drm_device *dev, unsigned long flags)
1093 {
1094         struct drm_i915_private *dev_priv = dev->dev_private;
1095         resource_size_t base, size;
1096         int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
1097
1098         /* i915 has 4 more counters */
1099         dev->counters += 4;
1100         dev->types[6] = _DRM_STAT_IRQ;
1101         dev->types[7] = _DRM_STAT_PRIMARY;
1102         dev->types[8] = _DRM_STAT_SECONDARY;
1103         dev->types[9] = _DRM_STAT_DMA;
1104
1105         dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
1106         if (dev_priv == NULL)
1107                 return -ENOMEM;
1108
1109         memset(dev_priv, 0, sizeof(drm_i915_private_t));
1110
1111         dev->dev_private = (void *)dev_priv;
1112         dev_priv->dev = dev;
1113
1114         /* Add register map (needed for suspend/resume) */
1115         base = drm_get_resource_start(dev, mmio_bar);
1116         size = drm_get_resource_len(dev, mmio_bar);
1117
1118         dev_priv->regs = ioremap(base, size);
1119         if (!dev_priv->regs) {
1120                 DRM_ERROR("failed to map registers\n");
1121                 ret = -EIO;
1122                 goto free_priv;
1123         }
1124
1125         dev_priv->mm.gtt_mapping =
1126                 io_mapping_create_wc(dev->agp->base,
1127                                      dev->agp->agp_info.aper_size * 1024*1024);
1128         if (dev_priv->mm.gtt_mapping == NULL) {
1129                 ret = -EIO;
1130                 goto out_rmmap;
1131         }
1132
1133         /* Set up a WC MTRR for non-PAT systems.  This is more common than
1134          * one would think, because the kernel disables PAT on first
1135          * generation Core chips because WC PAT gets overridden by a UC
1136          * MTRR if present.  Even if a UC MTRR isn't present.
1137          */
1138         dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1139                                          dev->agp->agp_info.aper_size *
1140                                          1024 * 1024,
1141                                          MTRR_TYPE_WRCOMB, 1);
1142         if (dev_priv->mm.gtt_mtrr < 0) {
1143                 DRM_INFO("MTRR allocation failed.  Graphics "
1144                          "performance may suffer.\n");
1145         }
1146
1147 #ifdef CONFIG_HIGHMEM64G
1148         /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
1149         dev_priv->has_gem = 0;
1150 #else
1151         /* enable GEM by default */
1152         dev_priv->has_gem = 1;
1153 #endif
1154
1155         dev->driver->get_vblank_counter = i915_get_vblank_counter;
1156         if (IS_GM45(dev))
1157                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1158
1159         i915_gem_load(dev);
1160
1161         /* Init HWS */
1162         if (!I915_NEED_GFX_HWS(dev)) {
1163                 ret = i915_init_phys_hws(dev);
1164                 if (ret != 0)
1165                         goto out_iomapfree;
1166         }
1167
1168         /* On the 945G/GM, the chipset reports the MSI capability on the
1169          * integrated graphics even though the support isn't actually there
1170          * according to the published specs.  It doesn't appear to function
1171          * correctly in testing on 945G.
1172          * This may be a side effect of MSI having been made available for PEG
1173          * and the registers being closely associated.
1174          *
1175          * According to chipset errata, on the 965GM, MSI interrupts may
1176          * be lost or delayed, but we use them anyways to avoid
1177          * stuck interrupts on some machines.
1178          */
1179         if (!IS_I945G(dev) && !IS_I945GM(dev))
1180                 pci_enable_msi(dev->pdev);
1181
1182         spin_lock_init(&dev_priv->user_irq_lock);
1183         dev_priv->user_irq_refcount = 0;
1184
1185         ret = drm_vblank_init(dev, I915_NUM_PIPE);
1186
1187         if (ret) {
1188                 (void) i915_driver_unload(dev);
1189                 return ret;
1190         }
1191
1192         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1193                 ret = i915_load_modeset_init(dev);
1194                 if (ret < 0) {
1195                         DRM_ERROR("failed to init modeset\n");
1196                         goto out_rmmap;
1197                 }
1198         }
1199
1200         /* Must be done after probing outputs */
1201         intel_opregion_init(dev, 0);
1202
1203         return 0;
1204
1205 out_iomapfree:
1206         io_mapping_free(dev_priv->mm.gtt_mapping);
1207 out_rmmap:
1208         iounmap(dev_priv->regs);
1209 free_priv:
1210         drm_free(dev_priv, sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
1211         return ret;
1212 }
1213
1214 int i915_driver_unload(struct drm_device *dev)
1215 {
1216         struct drm_i915_private *dev_priv = dev->dev_private;
1217
1218         io_mapping_free(dev_priv->mm.gtt_mapping);
1219         if (dev_priv->mm.gtt_mtrr >= 0) {
1220                 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1221                          dev->agp->agp_info.aper_size * 1024 * 1024);
1222                 dev_priv->mm.gtt_mtrr = -1;
1223         }
1224
1225         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1226                 drm_irq_uninstall(dev);
1227         }
1228
1229         if (dev->pdev->msi_enabled)
1230                 pci_disable_msi(dev->pdev);
1231
1232         if (dev_priv->regs != NULL)
1233                 iounmap(dev_priv->regs);
1234
1235         intel_opregion_free(dev);
1236
1237         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1238                 intel_modeset_cleanup(dev);
1239
1240                 i915_gem_free_all_phys_object(dev);
1241
1242                 mutex_lock(&dev->struct_mutex);
1243                 i915_gem_cleanup_ringbuffer(dev);
1244                 mutex_unlock(&dev->struct_mutex);
1245                 drm_mm_takedown(&dev_priv->vram);
1246                 i915_gem_lastclose(dev);
1247         }
1248
1249         drm_free(dev->dev_private, sizeof(drm_i915_private_t),
1250                  DRM_MEM_DRIVER);
1251
1252         return 0;
1253 }
1254
1255 int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1256 {
1257         struct drm_i915_file_private *i915_file_priv;
1258
1259         DRM_DEBUG("\n");
1260         i915_file_priv = (struct drm_i915_file_private *)
1261             drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
1262
1263         if (!i915_file_priv)
1264                 return -ENOMEM;
1265
1266         file_priv->driver_priv = i915_file_priv;
1267
1268         i915_file_priv->mm.last_gem_seqno = 0;
1269         i915_file_priv->mm.last_gem_throttle_seqno = 0;
1270
1271         return 0;
1272 }
1273
1274 /**
1275  * i915_driver_lastclose - clean up after all DRM clients have exited
1276  * @dev: DRM device
1277  *
1278  * Take care of cleaning up after all DRM clients have exited.  In the
1279  * mode setting case, we want to restore the kernel's initial mode (just
1280  * in case the last client left us in a bad state).
1281  *
1282  * Additionally, in the non-mode setting case, we'll tear down the AGP
1283  * and DMA structures, since the kernel won't be using them, and clea
1284  * up any GEM state.
1285  */
1286 void i915_driver_lastclose(struct drm_device * dev)
1287 {
1288         drm_i915_private_t *dev_priv = dev->dev_private;
1289
1290         if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
1291                 intelfb_restore();
1292                 return;
1293         }
1294
1295         i915_gem_lastclose(dev);
1296
1297         if (dev_priv->agp_heap)
1298                 i915_mem_takedown(&(dev_priv->agp_heap));
1299
1300         i915_dma_cleanup(dev);
1301 }
1302
1303 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1304 {
1305         drm_i915_private_t *dev_priv = dev->dev_private;
1306         if (!drm_core_check_feature(dev, DRIVER_MODESET))
1307                 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1308 }
1309
1310 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
1311 {
1312         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1313
1314         drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
1315 }
1316
1317 struct drm_ioctl_desc i915_ioctls[] = {
1318         DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1319         DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
1320         DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
1321         DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
1322         DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
1323         DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
1324         DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
1325         DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1326         DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
1327         DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
1328         DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1329         DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
1330         DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1331         DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
1332         DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
1333         DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
1334         DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1335         DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1336         DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1337         DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1338         DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1339         DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
1340         DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
1341         DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1342         DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1343         DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
1344         DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
1345         DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
1346         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
1347         DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
1348         DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
1349         DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
1350         DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
1351         DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
1352         DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
1353 };
1354
1355 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
1356
1357 /**
1358  * Determine if the device really is AGP or not.
1359  *
1360  * All Intel graphics chipsets are treated as AGP, even if they are really
1361  * PCI-e.
1362  *
1363  * \param dev   The device to be tested.
1364  *
1365  * \returns
1366  * A value of 1 is always retured to indictate every i9x5 is AGP.
1367  */
1368 int i915_driver_device_is_agp(struct drm_device * dev)
1369 {
1370         return 1;
1371 }