]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/gpu/drm/i915/i915_debugfs.c
47e3a8fdcc6d0a89a66c6f1d7c13cddb0fb25172
[mv-sheeva.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include "drmP.h"
33 #include "drm.h"
34 #include "intel_drv.h"
35 #include "i915_drm.h"
36 #include "i915_drv.h"
37
38 #define DRM_I915_RING_DEBUG 1
39
40
41 #if defined(CONFIG_DEBUG_FS)
42
43 enum {
44         RENDER_LIST,
45         BSD_LIST,
46         FLUSHING_LIST,
47         INACTIVE_LIST,
48         PINNED_LIST,
49         DEFERRED_FREE_LIST,
50 };
51
52 static const char *yesno(int v)
53 {
54         return v ? "yes" : "no";
55 }
56
57 static int i915_capabilities(struct seq_file *m, void *data)
58 {
59         struct drm_info_node *node = (struct drm_info_node *) m->private;
60         struct drm_device *dev = node->minor->dev;
61         const struct intel_device_info *info = INTEL_INFO(dev);
62
63         seq_printf(m, "gen: %d\n", info->gen);
64 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65         B(is_mobile);
66         B(is_i85x);
67         B(is_i915g);
68         B(is_i945gm);
69         B(is_g33);
70         B(need_gfx_hws);
71         B(is_g4x);
72         B(is_pineview);
73         B(is_broadwater);
74         B(is_crestline);
75         B(is_ironlake);
76         B(has_fbc);
77         B(has_rc6);
78         B(has_pipe_cxsr);
79         B(has_hotplug);
80         B(cursor_needs_physical);
81         B(has_overlay);
82         B(overlay_needs_physical);
83         B(supports_tv);
84 #undef B
85
86         return 0;
87 }
88
89 static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
90 {
91         if (obj_priv->user_pin_count > 0)
92                 return "P";
93         else if (obj_priv->pin_count > 0)
94                 return "p";
95         else
96                 return " ";
97 }
98
99 static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
100 {
101     switch (obj_priv->tiling_mode) {
102     default:
103     case I915_TILING_NONE: return " ";
104     case I915_TILING_X: return "X";
105     case I915_TILING_Y: return "Y";
106     }
107 }
108
109 static void
110 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
111 {
112         seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
113                    &obj->base,
114                    get_pin_flag(obj),
115                    get_tiling_flag(obj),
116                    obj->base.size,
117                    obj->base.read_domains,
118                    obj->base.write_domain,
119                    obj->last_rendering_seqno,
120                    obj->dirty ? " dirty" : "",
121                    obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
122         if (obj->base.name)
123                 seq_printf(m, " (name: %d)", obj->base.name);
124         if (obj->fence_reg != I915_FENCE_REG_NONE)
125                 seq_printf(m, " (fence: %d)", obj->fence_reg);
126         if (obj->gtt_space != NULL)
127                 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
128 }
129
130 static int i915_gem_object_list_info(struct seq_file *m, void *data)
131 {
132         struct drm_info_node *node = (struct drm_info_node *) m->private;
133         uintptr_t list = (uintptr_t) node->info_ent->data;
134         struct list_head *head;
135         struct drm_device *dev = node->minor->dev;
136         drm_i915_private_t *dev_priv = dev->dev_private;
137         struct drm_i915_gem_object *obj_priv;
138         size_t total_obj_size, total_gtt_size;
139         int count, ret;
140
141         ret = mutex_lock_interruptible(&dev->struct_mutex);
142         if (ret)
143                 return ret;
144
145         switch (list) {
146         case RENDER_LIST:
147                 seq_printf(m, "Render:\n");
148                 head = &dev_priv->render_ring.active_list;
149                 break;
150         case BSD_LIST:
151                 seq_printf(m, "BSD:\n");
152                 head = &dev_priv->bsd_ring.active_list;
153                 break;
154         case INACTIVE_LIST:
155                 seq_printf(m, "Inactive:\n");
156                 head = &dev_priv->mm.inactive_list;
157                 break;
158         case PINNED_LIST:
159                 seq_printf(m, "Pinned:\n");
160                 head = &dev_priv->mm.pinned_list;
161                 break;
162         case FLUSHING_LIST:
163                 seq_printf(m, "Flushing:\n");
164                 head = &dev_priv->mm.flushing_list;
165                 break;
166         case DEFERRED_FREE_LIST:
167                 seq_printf(m, "Deferred free:\n");
168                 head = &dev_priv->mm.deferred_free_list;
169                 break;
170         default:
171                 mutex_unlock(&dev->struct_mutex);
172                 return -EINVAL;
173         }
174
175         total_obj_size = total_gtt_size = count = 0;
176         list_for_each_entry(obj_priv, head, list) {
177                 seq_printf(m, "   ");
178                 describe_obj(m, obj_priv);
179                 seq_printf(m, "\n");
180                 total_obj_size += obj_priv->base.size;
181                 total_gtt_size += obj_priv->gtt_space->size;
182                 count++;
183         }
184         mutex_unlock(&dev->struct_mutex);
185
186         seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
187                    count, total_obj_size, total_gtt_size);
188         return 0;
189 }
190
191 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
192 {
193         struct drm_info_node *node = (struct drm_info_node *) m->private;
194         struct drm_device *dev = node->minor->dev;
195         unsigned long flags;
196         struct intel_crtc *crtc;
197
198         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
199                 const char *pipe = crtc->pipe ? "B" : "A";
200                 const char *plane = crtc->plane ? "B" : "A";
201                 struct intel_unpin_work *work;
202
203                 spin_lock_irqsave(&dev->event_lock, flags);
204                 work = crtc->unpin_work;
205                 if (work == NULL) {
206                         seq_printf(m, "No flip due on pipe %s (plane %s)\n",
207                                    pipe, plane);
208                 } else {
209                         if (!work->pending) {
210                                 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
211                                            pipe, plane);
212                         } else {
213                                 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
214                                            pipe, plane);
215                         }
216                         if (work->enable_stall_check)
217                                 seq_printf(m, "Stall check enabled, ");
218                         else
219                                 seq_printf(m, "Stall check waiting for page flip ioctl, ");
220                         seq_printf(m, "%d prepares\n", work->pending);
221
222                         if (work->old_fb_obj) {
223                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
224                                 if(obj_priv)
225                                         seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
226                         }
227                         if (work->pending_flip_obj) {
228                                 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
229                                 if(obj_priv)
230                                         seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
231                         }
232                 }
233                 spin_unlock_irqrestore(&dev->event_lock, flags);
234         }
235
236         return 0;
237 }
238
239 static int i915_gem_request_info(struct seq_file *m, void *data)
240 {
241         struct drm_info_node *node = (struct drm_info_node *) m->private;
242         struct drm_device *dev = node->minor->dev;
243         drm_i915_private_t *dev_priv = dev->dev_private;
244         struct drm_i915_gem_request *gem_request;
245         int ret;
246
247         ret = mutex_lock_interruptible(&dev->struct_mutex);
248         if (ret)
249                 return ret;
250
251         seq_printf(m, "Request:\n");
252         list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
253                         list) {
254                 seq_printf(m, "    %d @ %d\n",
255                            gem_request->seqno,
256                            (int) (jiffies - gem_request->emitted_jiffies));
257         }
258         mutex_unlock(&dev->struct_mutex);
259
260         return 0;
261 }
262
263 static int i915_gem_seqno_info(struct seq_file *m, void *data)
264 {
265         struct drm_info_node *node = (struct drm_info_node *) m->private;
266         struct drm_device *dev = node->minor->dev;
267         drm_i915_private_t *dev_priv = dev->dev_private;
268         int ret;
269
270         ret = mutex_lock_interruptible(&dev->struct_mutex);
271         if (ret)
272                 return ret;
273
274         if (dev_priv->render_ring.status_page.page_addr != NULL) {
275                 seq_printf(m, "Current sequence: %d\n",
276                            dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
277         } else {
278                 seq_printf(m, "Current sequence: hws uninitialized\n");
279         }
280         seq_printf(m, "Waiter sequence:  %d\n",
281                         dev_priv->mm.waiting_gem_seqno);
282         seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
283
284         mutex_unlock(&dev->struct_mutex);
285
286         return 0;
287 }
288
289
290 static int i915_interrupt_info(struct seq_file *m, void *data)
291 {
292         struct drm_info_node *node = (struct drm_info_node *) m->private;
293         struct drm_device *dev = node->minor->dev;
294         drm_i915_private_t *dev_priv = dev->dev_private;
295         int ret;
296
297         ret = mutex_lock_interruptible(&dev->struct_mutex);
298         if (ret)
299                 return ret;
300
301         if (!HAS_PCH_SPLIT(dev)) {
302                 seq_printf(m, "Interrupt enable:    %08x\n",
303                            I915_READ(IER));
304                 seq_printf(m, "Interrupt identity:  %08x\n",
305                            I915_READ(IIR));
306                 seq_printf(m, "Interrupt mask:      %08x\n",
307                            I915_READ(IMR));
308                 seq_printf(m, "Pipe A stat:         %08x\n",
309                            I915_READ(PIPEASTAT));
310                 seq_printf(m, "Pipe B stat:         %08x\n",
311                            I915_READ(PIPEBSTAT));
312         } else {
313                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
314                            I915_READ(DEIER));
315                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
316                            I915_READ(DEIIR));
317                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
318                            I915_READ(DEIMR));
319                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
320                            I915_READ(SDEIER));
321                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
322                            I915_READ(SDEIIR));
323                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
324                            I915_READ(SDEIMR));
325                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
326                            I915_READ(GTIER));
327                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
328                            I915_READ(GTIIR));
329                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
330                            I915_READ(GTIMR));
331         }
332         seq_printf(m, "Interrupts received: %d\n",
333                    atomic_read(&dev_priv->irq_received));
334         if (dev_priv->render_ring.status_page.page_addr != NULL) {
335                 seq_printf(m, "Current sequence:    %d\n",
336                            dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
337         } else {
338                 seq_printf(m, "Current sequence:    hws uninitialized\n");
339         }
340         seq_printf(m, "Waiter sequence:     %d\n",
341                    dev_priv->mm.waiting_gem_seqno);
342         seq_printf(m, "IRQ sequence:        %d\n",
343                    dev_priv->mm.irq_gem_seqno);
344         mutex_unlock(&dev->struct_mutex);
345
346         return 0;
347 }
348
349 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
350 {
351         struct drm_info_node *node = (struct drm_info_node *) m->private;
352         struct drm_device *dev = node->minor->dev;
353         drm_i915_private_t *dev_priv = dev->dev_private;
354         int i, ret;
355
356         ret = mutex_lock_interruptible(&dev->struct_mutex);
357         if (ret)
358                 return ret;
359
360         seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
361         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
362         for (i = 0; i < dev_priv->num_fence_regs; i++) {
363                 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
364
365                 if (obj == NULL) {
366                         seq_printf(m, "Fenced object[%2d] = unused\n", i);
367                 } else {
368                         struct drm_i915_gem_object *obj_priv;
369
370                         obj_priv = to_intel_bo(obj);
371                         seq_printf(m, "Fenced object[%2d] = %p: %s "
372                                    "%08x %08zx %08x %s %08x %08x %d",
373                                    i, obj, get_pin_flag(obj_priv),
374                                    obj_priv->gtt_offset,
375                                    obj->size, obj_priv->stride,
376                                    get_tiling_flag(obj_priv),
377                                    obj->read_domains, obj->write_domain,
378                                    obj_priv->last_rendering_seqno);
379                         if (obj->name)
380                                 seq_printf(m, " (name: %d)", obj->name);
381                         seq_printf(m, "\n");
382                 }
383         }
384         mutex_unlock(&dev->struct_mutex);
385
386         return 0;
387 }
388
389 static int i915_hws_info(struct seq_file *m, void *data)
390 {
391         struct drm_info_node *node = (struct drm_info_node *) m->private;
392         struct drm_device *dev = node->minor->dev;
393         drm_i915_private_t *dev_priv = dev->dev_private;
394         int i;
395         volatile u32 *hws;
396
397         hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
398         if (hws == NULL)
399                 return 0;
400
401         for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
402                 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
403                            i * 4,
404                            hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
405         }
406         return 0;
407 }
408
409 static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
410 {
411         int page, i;
412         uint32_t *mem;
413
414         for (page = 0; page < page_count; page++) {
415                 mem = kmap(pages[page]);
416                 for (i = 0; i < PAGE_SIZE; i += 4)
417                         seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
418                 kunmap(pages[page]);
419         }
420 }
421
422 static int i915_batchbuffer_info(struct seq_file *m, void *data)
423 {
424         struct drm_info_node *node = (struct drm_info_node *) m->private;
425         struct drm_device *dev = node->minor->dev;
426         drm_i915_private_t *dev_priv = dev->dev_private;
427         struct drm_gem_object *obj;
428         struct drm_i915_gem_object *obj_priv;
429         int ret;
430
431         ret = mutex_lock_interruptible(&dev->struct_mutex);
432         if (ret)
433                 return ret;
434
435         list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
436                         list) {
437                 obj = &obj_priv->base;
438                 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
439                     ret = i915_gem_object_get_pages(obj, 0);
440                     if (ret) {
441                             mutex_unlock(&dev->struct_mutex);
442                             return ret;
443                     }
444
445                     seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
446                     i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
447
448                     i915_gem_object_put_pages(obj);
449                 }
450         }
451
452         mutex_unlock(&dev->struct_mutex);
453
454         return 0;
455 }
456
457 static int i915_ringbuffer_data(struct seq_file *m, void *data)
458 {
459         struct drm_info_node *node = (struct drm_info_node *) m->private;
460         struct drm_device *dev = node->minor->dev;
461         drm_i915_private_t *dev_priv = dev->dev_private;
462         int ret;
463
464         ret = mutex_lock_interruptible(&dev->struct_mutex);
465         if (ret)
466                 return ret;
467
468         if (!dev_priv->render_ring.gem_object) {
469                 seq_printf(m, "No ringbuffer setup\n");
470         } else {
471                 u8 *virt = dev_priv->render_ring.virtual_start;
472                 uint32_t off;
473
474                 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
475                         uint32_t *ptr = (uint32_t *)(virt + off);
476                         seq_printf(m, "%08x :  %08x\n", off, *ptr);
477                 }
478         }
479         mutex_unlock(&dev->struct_mutex);
480
481         return 0;
482 }
483
484 static int i915_ringbuffer_info(struct seq_file *m, void *data)
485 {
486         struct drm_info_node *node = (struct drm_info_node *) m->private;
487         struct drm_device *dev = node->minor->dev;
488         drm_i915_private_t *dev_priv = dev->dev_private;
489         unsigned int head, tail;
490
491         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
492         tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
493
494         seq_printf(m, "RingHead :  %08x\n", head);
495         seq_printf(m, "RingTail :  %08x\n", tail);
496         seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
497         seq_printf(m, "Acthd :     %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
498
499         return 0;
500 }
501
502 static const char *pin_flag(int pinned)
503 {
504         if (pinned > 0)
505                 return " P";
506         else if (pinned < 0)
507                 return " p";
508         else
509                 return "";
510 }
511
512 static const char *tiling_flag(int tiling)
513 {
514         switch (tiling) {
515         default:
516         case I915_TILING_NONE: return "";
517         case I915_TILING_X: return " X";
518         case I915_TILING_Y: return " Y";
519         }
520 }
521
522 static const char *dirty_flag(int dirty)
523 {
524         return dirty ? " dirty" : "";
525 }
526
527 static const char *purgeable_flag(int purgeable)
528 {
529         return purgeable ? " purgeable" : "";
530 }
531
532 static int i915_error_state(struct seq_file *m, void *unused)
533 {
534         struct drm_info_node *node = (struct drm_info_node *) m->private;
535         struct drm_device *dev = node->minor->dev;
536         drm_i915_private_t *dev_priv = dev->dev_private;
537         struct drm_i915_error_state *error;
538         unsigned long flags;
539         int i, page, offset, elt;
540
541         spin_lock_irqsave(&dev_priv->error_lock, flags);
542         if (!dev_priv->first_error) {
543                 seq_printf(m, "no error state collected\n");
544                 goto out;
545         }
546
547         error = dev_priv->first_error;
548
549         seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
550                    error->time.tv_usec);
551         seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
552         seq_printf(m, "EIR: 0x%08x\n", error->eir);
553         seq_printf(m, "  PGTBL_ER: 0x%08x\n", error->pgtbl_er);
554         seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
555         seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
556         seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
557         seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
558         seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
559         if (INTEL_INFO(dev)->gen >= 4) {
560                 seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
561                 seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
562         }
563         seq_printf(m, "seqno: 0x%08x\n", error->seqno);
564
565         if (error->active_bo_count) {
566                 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
567
568                 for (i = 0; i < error->active_bo_count; i++) {
569                         seq_printf(m, "  %08x %8zd %08x %08x %08x%s%s%s%s",
570                                    error->active_bo[i].gtt_offset,
571                                    error->active_bo[i].size,
572                                    error->active_bo[i].read_domains,
573                                    error->active_bo[i].write_domain,
574                                    error->active_bo[i].seqno,
575                                    pin_flag(error->active_bo[i].pinned),
576                                    tiling_flag(error->active_bo[i].tiling),
577                                    dirty_flag(error->active_bo[i].dirty),
578                                    purgeable_flag(error->active_bo[i].purgeable));
579
580                         if (error->active_bo[i].name)
581                                 seq_printf(m, " (name: %d)", error->active_bo[i].name);
582                         if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
583                                 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
584
585                         seq_printf(m, "\n");
586                 }
587         }
588
589         for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
590                 if (error->batchbuffer[i]) {
591                         struct drm_i915_error_object *obj = error->batchbuffer[i];
592
593                         seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
594                         offset = 0;
595                         for (page = 0; page < obj->page_count; page++) {
596                                 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
597                                         seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
598                                         offset += 4;
599                                 }
600                         }
601                 }
602         }
603
604         if (error->ringbuffer) {
605                 struct drm_i915_error_object *obj = error->ringbuffer;
606
607                 seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
608                 offset = 0;
609                 for (page = 0; page < obj->page_count; page++) {
610                         for (elt = 0; elt < PAGE_SIZE/4; elt++) {
611                                 seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
612                                 offset += 4;
613                         }
614                 }
615         }
616
617         if (error->overlay)
618                 intel_overlay_print_error_state(m, error->overlay);
619
620 out:
621         spin_unlock_irqrestore(&dev_priv->error_lock, flags);
622
623         return 0;
624 }
625
626 static int i915_rstdby_delays(struct seq_file *m, void *unused)
627 {
628         struct drm_info_node *node = (struct drm_info_node *) m->private;
629         struct drm_device *dev = node->minor->dev;
630         drm_i915_private_t *dev_priv = dev->dev_private;
631         u16 crstanddelay = I915_READ16(CRSTANDVID);
632
633         seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
634
635         return 0;
636 }
637
638 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
639 {
640         struct drm_info_node *node = (struct drm_info_node *) m->private;
641         struct drm_device *dev = node->minor->dev;
642         drm_i915_private_t *dev_priv = dev->dev_private;
643         u16 rgvswctl = I915_READ16(MEMSWCTL);
644         u16 rgvstat = I915_READ16(MEMSTAT_ILK);
645
646         seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
647         seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
648         seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
649                    MEMSTAT_VID_SHIFT);
650         seq_printf(m, "Current P-state: %d\n",
651                    (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
652
653         return 0;
654 }
655
656 static int i915_delayfreq_table(struct seq_file *m, void *unused)
657 {
658         struct drm_info_node *node = (struct drm_info_node *) m->private;
659         struct drm_device *dev = node->minor->dev;
660         drm_i915_private_t *dev_priv = dev->dev_private;
661         u32 delayfreq;
662         int i;
663
664         for (i = 0; i < 16; i++) {
665                 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
666                 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
667                            (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
668         }
669
670         return 0;
671 }
672
673 static inline int MAP_TO_MV(int map)
674 {
675         return 1250 - (map * 25);
676 }
677
678 static int i915_inttoext_table(struct seq_file *m, void *unused)
679 {
680         struct drm_info_node *node = (struct drm_info_node *) m->private;
681         struct drm_device *dev = node->minor->dev;
682         drm_i915_private_t *dev_priv = dev->dev_private;
683         u32 inttoext;
684         int i;
685
686         for (i = 1; i <= 32; i++) {
687                 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
688                 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
689         }
690
691         return 0;
692 }
693
694 static int i915_drpc_info(struct seq_file *m, void *unused)
695 {
696         struct drm_info_node *node = (struct drm_info_node *) m->private;
697         struct drm_device *dev = node->minor->dev;
698         drm_i915_private_t *dev_priv = dev->dev_private;
699         u32 rgvmodectl = I915_READ(MEMMODECTL);
700         u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
701         u16 crstandvid = I915_READ16(CRSTANDVID);
702
703         seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
704                    "yes" : "no");
705         seq_printf(m, "Boost freq: %d\n",
706                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
707                    MEMMODE_BOOST_FREQ_SHIFT);
708         seq_printf(m, "HW control enabled: %s\n",
709                    rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
710         seq_printf(m, "SW control enabled: %s\n",
711                    rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
712         seq_printf(m, "Gated voltage change: %s\n",
713                    rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
714         seq_printf(m, "Starting frequency: P%d\n",
715                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
716         seq_printf(m, "Max P-state: P%d\n",
717                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
718         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
719         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
720         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
721         seq_printf(m, "Render standby enabled: %s\n",
722                    (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
723
724         return 0;
725 }
726
727 static int i915_fbc_status(struct seq_file *m, void *unused)
728 {
729         struct drm_info_node *node = (struct drm_info_node *) m->private;
730         struct drm_device *dev = node->minor->dev;
731         drm_i915_private_t *dev_priv = dev->dev_private;
732
733         if (!I915_HAS_FBC(dev)) {
734                 seq_printf(m, "FBC unsupported on this chipset\n");
735                 return 0;
736         }
737
738         if (intel_fbc_enabled(dev)) {
739                 seq_printf(m, "FBC enabled\n");
740         } else {
741                 seq_printf(m, "FBC disabled: ");
742                 switch (dev_priv->no_fbc_reason) {
743                 case FBC_NO_OUTPUT:
744                         seq_printf(m, "no outputs");
745                         break;
746                 case FBC_STOLEN_TOO_SMALL:
747                         seq_printf(m, "not enough stolen memory");
748                         break;
749                 case FBC_UNSUPPORTED_MODE:
750                         seq_printf(m, "mode not supported");
751                         break;
752                 case FBC_MODE_TOO_LARGE:
753                         seq_printf(m, "mode too large");
754                         break;
755                 case FBC_BAD_PLANE:
756                         seq_printf(m, "FBC unsupported on plane");
757                         break;
758                 case FBC_NOT_TILED:
759                         seq_printf(m, "scanout buffer not tiled");
760                         break;
761                 case FBC_MULTIPLE_PIPES:
762                         seq_printf(m, "multiple pipes are enabled");
763                         break;
764                 default:
765                         seq_printf(m, "unknown reason");
766                 }
767                 seq_printf(m, "\n");
768         }
769         return 0;
770 }
771
772 static int i915_sr_status(struct seq_file *m, void *unused)
773 {
774         struct drm_info_node *node = (struct drm_info_node *) m->private;
775         struct drm_device *dev = node->minor->dev;
776         drm_i915_private_t *dev_priv = dev->dev_private;
777         bool sr_enabled = false;
778
779         if (IS_IRONLAKE(dev))
780                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
781         else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
782                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
783         else if (IS_I915GM(dev))
784                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
785         else if (IS_PINEVIEW(dev))
786                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
787
788         seq_printf(m, "self-refresh: %s\n",
789                    sr_enabled ? "enabled" : "disabled");
790
791         return 0;
792 }
793
794 static int i915_emon_status(struct seq_file *m, void *unused)
795 {
796         struct drm_info_node *node = (struct drm_info_node *) m->private;
797         struct drm_device *dev = node->minor->dev;
798         drm_i915_private_t *dev_priv = dev->dev_private;
799         unsigned long temp, chipset, gfx;
800         int ret;
801
802         ret = mutex_lock_interruptible(&dev->struct_mutex);
803         if (ret)
804                 return ret;
805
806         temp = i915_mch_val(dev_priv);
807         chipset = i915_chipset_val(dev_priv);
808         gfx = i915_gfx_val(dev_priv);
809         mutex_unlock(&dev->struct_mutex);
810
811         seq_printf(m, "GMCH temp: %ld\n", temp);
812         seq_printf(m, "Chipset power: %ld\n", chipset);
813         seq_printf(m, "GFX power: %ld\n", gfx);
814         seq_printf(m, "Total power: %ld\n", chipset + gfx);
815
816         return 0;
817 }
818
819 static int i915_gfxec(struct seq_file *m, void *unused)
820 {
821         struct drm_info_node *node = (struct drm_info_node *) m->private;
822         struct drm_device *dev = node->minor->dev;
823         drm_i915_private_t *dev_priv = dev->dev_private;
824
825         seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
826
827         return 0;
828 }
829
830 static int i915_opregion(struct seq_file *m, void *unused)
831 {
832         struct drm_info_node *node = (struct drm_info_node *) m->private;
833         struct drm_device *dev = node->minor->dev;
834         drm_i915_private_t *dev_priv = dev->dev_private;
835         struct intel_opregion *opregion = &dev_priv->opregion;
836         int ret;
837
838         ret = mutex_lock_interruptible(&dev->struct_mutex);
839         if (ret)
840                 return ret;
841
842         if (opregion->header)
843                 seq_write(m, opregion->header, OPREGION_SIZE);
844
845         mutex_unlock(&dev->struct_mutex);
846
847         return 0;
848 }
849
850 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
851 {
852         struct drm_info_node *node = (struct drm_info_node *) m->private;
853         struct drm_device *dev = node->minor->dev;
854         drm_i915_private_t *dev_priv = dev->dev_private;
855         struct intel_fbdev *ifbdev;
856         struct intel_framebuffer *fb;
857         int ret;
858
859         ret = mutex_lock_interruptible(&dev->mode_config.mutex);
860         if (ret)
861                 return ret;
862
863         ifbdev = dev_priv->fbdev;
864         fb = to_intel_framebuffer(ifbdev->helper.fb);
865
866         seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
867                    fb->base.width,
868                    fb->base.height,
869                    fb->base.depth,
870                    fb->base.bits_per_pixel);
871         describe_obj(m, to_intel_bo(fb->obj));
872         seq_printf(m, "\n");
873
874         list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
875                 if (&fb->base == ifbdev->helper.fb)
876                         continue;
877
878                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
879                            fb->base.width,
880                            fb->base.height,
881                            fb->base.depth,
882                            fb->base.bits_per_pixel);
883                 describe_obj(m, to_intel_bo(fb->obj));
884                 seq_printf(m, "\n");
885         }
886
887         mutex_unlock(&dev->mode_config.mutex);
888
889         return 0;
890 }
891
892 static int
893 i915_wedged_open(struct inode *inode,
894                  struct file *filp)
895 {
896         filp->private_data = inode->i_private;
897         return 0;
898 }
899
900 static ssize_t
901 i915_wedged_read(struct file *filp,
902                  char __user *ubuf,
903                  size_t max,
904                  loff_t *ppos)
905 {
906         struct drm_device *dev = filp->private_data;
907         drm_i915_private_t *dev_priv = dev->dev_private;
908         char buf[80];
909         int len;
910
911         len = snprintf(buf, sizeof (buf),
912                        "wedged :  %d\n",
913                        atomic_read(&dev_priv->mm.wedged));
914
915         if (len > sizeof (buf))
916                 len = sizeof (buf);
917
918         return simple_read_from_buffer(ubuf, max, ppos, buf, len);
919 }
920
921 static ssize_t
922 i915_wedged_write(struct file *filp,
923                   const char __user *ubuf,
924                   size_t cnt,
925                   loff_t *ppos)
926 {
927         struct drm_device *dev = filp->private_data;
928         drm_i915_private_t *dev_priv = dev->dev_private;
929         char buf[20];
930         int val = 1;
931
932         if (cnt > 0) {
933                 if (cnt > sizeof (buf) - 1)
934                         return -EINVAL;
935
936                 if (copy_from_user(buf, ubuf, cnt))
937                         return -EFAULT;
938                 buf[cnt] = 0;
939
940                 val = simple_strtoul(buf, NULL, 0);
941         }
942
943         DRM_INFO("Manually setting wedged to %d\n", val);
944
945         atomic_set(&dev_priv->mm.wedged, val);
946         if (val) {
947                 wake_up_all(&dev_priv->irq_queue);
948                 queue_work(dev_priv->wq, &dev_priv->error_work);
949         }
950
951         return cnt;
952 }
953
954 static const struct file_operations i915_wedged_fops = {
955         .owner = THIS_MODULE,
956         .open = i915_wedged_open,
957         .read = i915_wedged_read,
958         .write = i915_wedged_write,
959 };
960
961 /* As the drm_debugfs_init() routines are called before dev->dev_private is
962  * allocated we need to hook into the minor for release. */
963 static int
964 drm_add_fake_info_node(struct drm_minor *minor,
965                        struct dentry *ent,
966                        const void *key)
967 {
968         struct drm_info_node *node;
969
970         node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
971         if (node == NULL) {
972                 debugfs_remove(ent);
973                 return -ENOMEM;
974         }
975
976         node->minor = minor;
977         node->dent = ent;
978         node->info_ent = (void *) key;
979         list_add(&node->list, &minor->debugfs_nodes.list);
980
981         return 0;
982 }
983
984 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
985 {
986         struct drm_device *dev = minor->dev;
987         struct dentry *ent;
988
989         ent = debugfs_create_file("i915_wedged",
990                                   S_IRUGO | S_IWUSR,
991                                   root, dev,
992                                   &i915_wedged_fops);
993         if (IS_ERR(ent))
994                 return PTR_ERR(ent);
995
996         return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
997 }
998
999 static struct drm_info_list i915_debugfs_list[] = {
1000         {"i915_capabilities", i915_capabilities, 0, 0},
1001         {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST},
1002         {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST},
1003         {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1004         {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1005         {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1006         {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
1007         {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
1008         {"i915_gem_request", i915_gem_request_info, 0},
1009         {"i915_gem_seqno", i915_gem_seqno_info, 0},
1010         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1011         {"i915_gem_interrupt", i915_interrupt_info, 0},
1012         {"i915_gem_hws", i915_hws_info, 0},
1013         {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
1014         {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
1015         {"i915_batchbuffers", i915_batchbuffer_info, 0},
1016         {"i915_error_state", i915_error_state, 0},
1017         {"i915_rstdby_delays", i915_rstdby_delays, 0},
1018         {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
1019         {"i915_delayfreq_table", i915_delayfreq_table, 0},
1020         {"i915_inttoext_table", i915_inttoext_table, 0},
1021         {"i915_drpc_info", i915_drpc_info, 0},
1022         {"i915_emon_status", i915_emon_status, 0},
1023         {"i915_gfxec", i915_gfxec, 0},
1024         {"i915_fbc_status", i915_fbc_status, 0},
1025         {"i915_sr_status", i915_sr_status, 0},
1026         {"i915_opregion", i915_opregion, 0},
1027         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1028 };
1029 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1030
1031 int i915_debugfs_init(struct drm_minor *minor)
1032 {
1033         int ret;
1034
1035         ret = i915_wedged_create(minor->debugfs_root, minor);
1036         if (ret)
1037                 return ret;
1038
1039         return drm_debugfs_create_files(i915_debugfs_list,
1040                                         I915_DEBUGFS_ENTRIES,
1041                                         minor->debugfs_root, minor);
1042 }
1043
1044 void i915_debugfs_cleanup(struct drm_minor *minor)
1045 {
1046         drm_debugfs_remove_files(i915_debugfs_list,
1047                                  I915_DEBUGFS_ENTRIES, minor);
1048         drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1049                                  1, minor);
1050 }
1051
1052 #endif /* CONFIG_DEBUG_FS */