]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_debugfs.c
drm/i915: Kconfig option to disable the legacy fbdev support
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/list_sort.h>
34 #include <asm/msr-index.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include "intel_ringbuffer.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40
41 #if defined(CONFIG_DEBUG_FS)
42
43 enum {
44         ACTIVE_LIST,
45         INACTIVE_LIST,
46         PINNED_LIST,
47 };
48
49 static const char *yesno(int v)
50 {
51         return v ? "yes" : "no";
52 }
53
54 static int i915_capabilities(struct seq_file *m, void *data)
55 {
56         struct drm_info_node *node = (struct drm_info_node *) m->private;
57         struct drm_device *dev = node->minor->dev;
58         const struct intel_device_info *info = INTEL_INFO(dev);
59
60         seq_printf(m, "gen: %d\n", info->gen);
61         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
62 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
63 #define SEP_SEMICOLON ;
64         DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
65 #undef PRINT_FLAG
66 #undef SEP_SEMICOLON
67
68         return 0;
69 }
70
71 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
72 {
73         if (obj->user_pin_count > 0)
74                 return "P";
75         else if (obj->pin_count > 0)
76                 return "p";
77         else
78                 return " ";
79 }
80
81 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
82 {
83         switch (obj->tiling_mode) {
84         default:
85         case I915_TILING_NONE: return " ";
86         case I915_TILING_X: return "X";
87         case I915_TILING_Y: return "Y";
88         }
89 }
90
91 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
92 {
93         return obj->has_global_gtt_mapping ? "g" : " ";
94 }
95
96 static void
97 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
98 {
99         struct i915_vma *vma;
100         seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
101                    &obj->base,
102                    get_pin_flag(obj),
103                    get_tiling_flag(obj),
104                    get_global_flag(obj),
105                    obj->base.size / 1024,
106                    obj->base.read_domains,
107                    obj->base.write_domain,
108                    obj->last_read_seqno,
109                    obj->last_write_seqno,
110                    obj->last_fenced_seqno,
111                    i915_cache_level_str(obj->cache_level),
112                    obj->dirty ? " dirty" : "",
113                    obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
114         if (obj->base.name)
115                 seq_printf(m, " (name: %d)", obj->base.name);
116         if (obj->pin_count)
117                 seq_printf(m, " (pinned x %d)", obj->pin_count);
118         if (obj->pin_display)
119                 seq_printf(m, " (display)");
120         if (obj->fence_reg != I915_FENCE_REG_NONE)
121                 seq_printf(m, " (fence: %d)", obj->fence_reg);
122         list_for_each_entry(vma, &obj->vma_list, vma_link) {
123                 if (!i915_is_ggtt(vma->vm))
124                         seq_puts(m, " (pp");
125                 else
126                         seq_puts(m, " (g");
127                 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
128                            vma->node.start, vma->node.size);
129         }
130         if (obj->stolen)
131                 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
132         if (obj->pin_mappable || obj->fault_mappable) {
133                 char s[3], *t = s;
134                 if (obj->pin_mappable)
135                         *t++ = 'p';
136                 if (obj->fault_mappable)
137                         *t++ = 'f';
138                 *t = '\0';
139                 seq_printf(m, " (%s mappable)", s);
140         }
141         if (obj->ring != NULL)
142                 seq_printf(m, " (%s)", obj->ring->name);
143 }
144
145 static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
146 {
147         seq_putc(m, ctx->is_initialized ? 'I' : 'i');
148         seq_putc(m, ctx->remap_slice ? 'R' : 'r');
149         seq_putc(m, ' ');
150 }
151
152 static int i915_gem_object_list_info(struct seq_file *m, void *data)
153 {
154         struct drm_info_node *node = (struct drm_info_node *) m->private;
155         uintptr_t list = (uintptr_t) node->info_ent->data;
156         struct list_head *head;
157         struct drm_device *dev = node->minor->dev;
158         struct drm_i915_private *dev_priv = dev->dev_private;
159         struct i915_address_space *vm = &dev_priv->gtt.base;
160         struct i915_vma *vma;
161         size_t total_obj_size, total_gtt_size;
162         int count, ret;
163
164         ret = mutex_lock_interruptible(&dev->struct_mutex);
165         if (ret)
166                 return ret;
167
168         /* FIXME: the user of this interface might want more than just GGTT */
169         switch (list) {
170         case ACTIVE_LIST:
171                 seq_puts(m, "Active:\n");
172                 head = &vm->active_list;
173                 break;
174         case INACTIVE_LIST:
175                 seq_puts(m, "Inactive:\n");
176                 head = &vm->inactive_list;
177                 break;
178         default:
179                 mutex_unlock(&dev->struct_mutex);
180                 return -EINVAL;
181         }
182
183         total_obj_size = total_gtt_size = count = 0;
184         list_for_each_entry(vma, head, mm_list) {
185                 seq_printf(m, "   ");
186                 describe_obj(m, vma->obj);
187                 seq_printf(m, "\n");
188                 total_obj_size += vma->obj->base.size;
189                 total_gtt_size += vma->node.size;
190                 count++;
191         }
192         mutex_unlock(&dev->struct_mutex);
193
194         seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
195                    count, total_obj_size, total_gtt_size);
196         return 0;
197 }
198
199 static int obj_rank_by_stolen(void *priv,
200                               struct list_head *A, struct list_head *B)
201 {
202         struct drm_i915_gem_object *a =
203                 container_of(A, struct drm_i915_gem_object, obj_exec_link);
204         struct drm_i915_gem_object *b =
205                 container_of(B, struct drm_i915_gem_object, obj_exec_link);
206
207         return a->stolen->start - b->stolen->start;
208 }
209
210 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
211 {
212         struct drm_info_node *node = (struct drm_info_node *) m->private;
213         struct drm_device *dev = node->minor->dev;
214         struct drm_i915_private *dev_priv = dev->dev_private;
215         struct drm_i915_gem_object *obj;
216         size_t total_obj_size, total_gtt_size;
217         LIST_HEAD(stolen);
218         int count, ret;
219
220         ret = mutex_lock_interruptible(&dev->struct_mutex);
221         if (ret)
222                 return ret;
223
224         total_obj_size = total_gtt_size = count = 0;
225         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
226                 if (obj->stolen == NULL)
227                         continue;
228
229                 list_add(&obj->obj_exec_link, &stolen);
230
231                 total_obj_size += obj->base.size;
232                 total_gtt_size += i915_gem_obj_ggtt_size(obj);
233                 count++;
234         }
235         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
236                 if (obj->stolen == NULL)
237                         continue;
238
239                 list_add(&obj->obj_exec_link, &stolen);
240
241                 total_obj_size += obj->base.size;
242                 count++;
243         }
244         list_sort(NULL, &stolen, obj_rank_by_stolen);
245         seq_puts(m, "Stolen:\n");
246         while (!list_empty(&stolen)) {
247                 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
248                 seq_puts(m, "   ");
249                 describe_obj(m, obj);
250                 seq_putc(m, '\n');
251                 list_del_init(&obj->obj_exec_link);
252         }
253         mutex_unlock(&dev->struct_mutex);
254
255         seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
256                    count, total_obj_size, total_gtt_size);
257         return 0;
258 }
259
260 #define count_objects(list, member) do { \
261         list_for_each_entry(obj, list, member) { \
262                 size += i915_gem_obj_ggtt_size(obj); \
263                 ++count; \
264                 if (obj->map_and_fenceable) { \
265                         mappable_size += i915_gem_obj_ggtt_size(obj); \
266                         ++mappable_count; \
267                 } \
268         } \
269 } while (0)
270
271 struct file_stats {
272         int count;
273         size_t total, active, inactive, unbound;
274 };
275
276 static int per_file_stats(int id, void *ptr, void *data)
277 {
278         struct drm_i915_gem_object *obj = ptr;
279         struct file_stats *stats = data;
280
281         stats->count++;
282         stats->total += obj->base.size;
283
284         if (i915_gem_obj_ggtt_bound(obj)) {
285                 if (!list_empty(&obj->ring_list))
286                         stats->active += obj->base.size;
287                 else
288                         stats->inactive += obj->base.size;
289         } else {
290                 if (!list_empty(&obj->global_list))
291                         stats->unbound += obj->base.size;
292         }
293
294         return 0;
295 }
296
297 #define count_vmas(list, member) do { \
298         list_for_each_entry(vma, list, member) { \
299                 size += i915_gem_obj_ggtt_size(vma->obj); \
300                 ++count; \
301                 if (vma->obj->map_and_fenceable) { \
302                         mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
303                         ++mappable_count; \
304                 } \
305         } \
306 } while (0)
307
308 static int i915_gem_object_info(struct seq_file *m, void* data)
309 {
310         struct drm_info_node *node = (struct drm_info_node *) m->private;
311         struct drm_device *dev = node->minor->dev;
312         struct drm_i915_private *dev_priv = dev->dev_private;
313         u32 count, mappable_count, purgeable_count;
314         size_t size, mappable_size, purgeable_size;
315         struct drm_i915_gem_object *obj;
316         struct i915_address_space *vm = &dev_priv->gtt.base;
317         struct drm_file *file;
318         struct i915_vma *vma;
319         int ret;
320
321         ret = mutex_lock_interruptible(&dev->struct_mutex);
322         if (ret)
323                 return ret;
324
325         seq_printf(m, "%u objects, %zu bytes\n",
326                    dev_priv->mm.object_count,
327                    dev_priv->mm.object_memory);
328
329         size = count = mappable_size = mappable_count = 0;
330         count_objects(&dev_priv->mm.bound_list, global_list);
331         seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
332                    count, mappable_count, size, mappable_size);
333
334         size = count = mappable_size = mappable_count = 0;
335         count_vmas(&vm->active_list, mm_list);
336         seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
337                    count, mappable_count, size, mappable_size);
338
339         size = count = mappable_size = mappable_count = 0;
340         count_vmas(&vm->inactive_list, mm_list);
341         seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
342                    count, mappable_count, size, mappable_size);
343
344         size = count = purgeable_size = purgeable_count = 0;
345         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
346                 size += obj->base.size, ++count;
347                 if (obj->madv == I915_MADV_DONTNEED)
348                         purgeable_size += obj->base.size, ++purgeable_count;
349         }
350         seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
351
352         size = count = mappable_size = mappable_count = 0;
353         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
354                 if (obj->fault_mappable) {
355                         size += i915_gem_obj_ggtt_size(obj);
356                         ++count;
357                 }
358                 if (obj->pin_mappable) {
359                         mappable_size += i915_gem_obj_ggtt_size(obj);
360                         ++mappable_count;
361                 }
362                 if (obj->madv == I915_MADV_DONTNEED) {
363                         purgeable_size += obj->base.size;
364                         ++purgeable_count;
365                 }
366         }
367         seq_printf(m, "%u purgeable objects, %zu bytes\n",
368                    purgeable_count, purgeable_size);
369         seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
370                    mappable_count, mappable_size);
371         seq_printf(m, "%u fault mappable objects, %zu bytes\n",
372                    count, size);
373
374         seq_printf(m, "%zu [%lu] gtt total\n",
375                    dev_priv->gtt.base.total,
376                    dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
377
378         seq_putc(m, '\n');
379         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
380                 struct file_stats stats;
381
382                 memset(&stats, 0, sizeof(stats));
383                 idr_for_each(&file->object_idr, per_file_stats, &stats);
384                 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
385                            get_pid_task(file->pid, PIDTYPE_PID)->comm,
386                            stats.count,
387                            stats.total,
388                            stats.active,
389                            stats.inactive,
390                            stats.unbound);
391         }
392
393         mutex_unlock(&dev->struct_mutex);
394
395         return 0;
396 }
397
398 static int i915_gem_gtt_info(struct seq_file *m, void *data)
399 {
400         struct drm_info_node *node = (struct drm_info_node *) m->private;
401         struct drm_device *dev = node->minor->dev;
402         uintptr_t list = (uintptr_t) node->info_ent->data;
403         struct drm_i915_private *dev_priv = dev->dev_private;
404         struct drm_i915_gem_object *obj;
405         size_t total_obj_size, total_gtt_size;
406         int count, ret;
407
408         ret = mutex_lock_interruptible(&dev->struct_mutex);
409         if (ret)
410                 return ret;
411
412         total_obj_size = total_gtt_size = count = 0;
413         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
414                 if (list == PINNED_LIST && obj->pin_count == 0)
415                         continue;
416
417                 seq_puts(m, "   ");
418                 describe_obj(m, obj);
419                 seq_putc(m, '\n');
420                 total_obj_size += obj->base.size;
421                 total_gtt_size += i915_gem_obj_ggtt_size(obj);
422                 count++;
423         }
424
425         mutex_unlock(&dev->struct_mutex);
426
427         seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
428                    count, total_obj_size, total_gtt_size);
429
430         return 0;
431 }
432
433 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
434 {
435         struct drm_info_node *node = (struct drm_info_node *) m->private;
436         struct drm_device *dev = node->minor->dev;
437         unsigned long flags;
438         struct intel_crtc *crtc;
439
440         list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
441                 const char pipe = pipe_name(crtc->pipe);
442                 const char plane = plane_name(crtc->plane);
443                 struct intel_unpin_work *work;
444
445                 spin_lock_irqsave(&dev->event_lock, flags);
446                 work = crtc->unpin_work;
447                 if (work == NULL) {
448                         seq_printf(m, "No flip due on pipe %c (plane %c)\n",
449                                    pipe, plane);
450                 } else {
451                         if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
452                                 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
453                                            pipe, plane);
454                         } else {
455                                 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
456                                            pipe, plane);
457                         }
458                         if (work->enable_stall_check)
459                                 seq_puts(m, "Stall check enabled, ");
460                         else
461                                 seq_puts(m, "Stall check waiting for page flip ioctl, ");
462                         seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
463
464                         if (work->old_fb_obj) {
465                                 struct drm_i915_gem_object *obj = work->old_fb_obj;
466                                 if (obj)
467                                         seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
468                                                    i915_gem_obj_ggtt_offset(obj));
469                         }
470                         if (work->pending_flip_obj) {
471                                 struct drm_i915_gem_object *obj = work->pending_flip_obj;
472                                 if (obj)
473                                         seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
474                                                    i915_gem_obj_ggtt_offset(obj));
475                         }
476                 }
477                 spin_unlock_irqrestore(&dev->event_lock, flags);
478         }
479
480         return 0;
481 }
482
483 static int i915_gem_request_info(struct seq_file *m, void *data)
484 {
485         struct drm_info_node *node = (struct drm_info_node *) m->private;
486         struct drm_device *dev = node->minor->dev;
487         drm_i915_private_t *dev_priv = dev->dev_private;
488         struct intel_ring_buffer *ring;
489         struct drm_i915_gem_request *gem_request;
490         int ret, count, i;
491
492         ret = mutex_lock_interruptible(&dev->struct_mutex);
493         if (ret)
494                 return ret;
495
496         count = 0;
497         for_each_ring(ring, dev_priv, i) {
498                 if (list_empty(&ring->request_list))
499                         continue;
500
501                 seq_printf(m, "%s requests:\n", ring->name);
502                 list_for_each_entry(gem_request,
503                                     &ring->request_list,
504                                     list) {
505                         seq_printf(m, "    %d @ %d\n",
506                                    gem_request->seqno,
507                                    (int) (jiffies - gem_request->emitted_jiffies));
508                 }
509                 count++;
510         }
511         mutex_unlock(&dev->struct_mutex);
512
513         if (count == 0)
514                 seq_puts(m, "No requests\n");
515
516         return 0;
517 }
518
519 static void i915_ring_seqno_info(struct seq_file *m,
520                                  struct intel_ring_buffer *ring)
521 {
522         if (ring->get_seqno) {
523                 seq_printf(m, "Current sequence (%s): %u\n",
524                            ring->name, ring->get_seqno(ring, false));
525         }
526 }
527
528 static int i915_gem_seqno_info(struct seq_file *m, void *data)
529 {
530         struct drm_info_node *node = (struct drm_info_node *) m->private;
531         struct drm_device *dev = node->minor->dev;
532         drm_i915_private_t *dev_priv = dev->dev_private;
533         struct intel_ring_buffer *ring;
534         int ret, i;
535
536         ret = mutex_lock_interruptible(&dev->struct_mutex);
537         if (ret)
538                 return ret;
539
540         for_each_ring(ring, dev_priv, i)
541                 i915_ring_seqno_info(m, ring);
542
543         mutex_unlock(&dev->struct_mutex);
544
545         return 0;
546 }
547
548
549 static int i915_interrupt_info(struct seq_file *m, void *data)
550 {
551         struct drm_info_node *node = (struct drm_info_node *) m->private;
552         struct drm_device *dev = node->minor->dev;
553         drm_i915_private_t *dev_priv = dev->dev_private;
554         struct intel_ring_buffer *ring;
555         int ret, i, pipe;
556
557         ret = mutex_lock_interruptible(&dev->struct_mutex);
558         if (ret)
559                 return ret;
560
561         if (IS_VALLEYVIEW(dev)) {
562                 seq_printf(m, "Display IER:\t%08x\n",
563                            I915_READ(VLV_IER));
564                 seq_printf(m, "Display IIR:\t%08x\n",
565                            I915_READ(VLV_IIR));
566                 seq_printf(m, "Display IIR_RW:\t%08x\n",
567                            I915_READ(VLV_IIR_RW));
568                 seq_printf(m, "Display IMR:\t%08x\n",
569                            I915_READ(VLV_IMR));
570                 for_each_pipe(pipe)
571                         seq_printf(m, "Pipe %c stat:\t%08x\n",
572                                    pipe_name(pipe),
573                                    I915_READ(PIPESTAT(pipe)));
574
575                 seq_printf(m, "Master IER:\t%08x\n",
576                            I915_READ(VLV_MASTER_IER));
577
578                 seq_printf(m, "Render IER:\t%08x\n",
579                            I915_READ(GTIER));
580                 seq_printf(m, "Render IIR:\t%08x\n",
581                            I915_READ(GTIIR));
582                 seq_printf(m, "Render IMR:\t%08x\n",
583                            I915_READ(GTIMR));
584
585                 seq_printf(m, "PM IER:\t\t%08x\n",
586                            I915_READ(GEN6_PMIER));
587                 seq_printf(m, "PM IIR:\t\t%08x\n",
588                            I915_READ(GEN6_PMIIR));
589                 seq_printf(m, "PM IMR:\t\t%08x\n",
590                            I915_READ(GEN6_PMIMR));
591
592                 seq_printf(m, "Port hotplug:\t%08x\n",
593                            I915_READ(PORT_HOTPLUG_EN));
594                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
595                            I915_READ(VLV_DPFLIPSTAT));
596                 seq_printf(m, "DPINVGTT:\t%08x\n",
597                            I915_READ(DPINVGTT));
598
599         } else if (!HAS_PCH_SPLIT(dev)) {
600                 seq_printf(m, "Interrupt enable:    %08x\n",
601                            I915_READ(IER));
602                 seq_printf(m, "Interrupt identity:  %08x\n",
603                            I915_READ(IIR));
604                 seq_printf(m, "Interrupt mask:      %08x\n",
605                            I915_READ(IMR));
606                 for_each_pipe(pipe)
607                         seq_printf(m, "Pipe %c stat:         %08x\n",
608                                    pipe_name(pipe),
609                                    I915_READ(PIPESTAT(pipe)));
610         } else {
611                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
612                            I915_READ(DEIER));
613                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
614                            I915_READ(DEIIR));
615                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
616                            I915_READ(DEIMR));
617                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
618                            I915_READ(SDEIER));
619                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
620                            I915_READ(SDEIIR));
621                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
622                            I915_READ(SDEIMR));
623                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
624                            I915_READ(GTIER));
625                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
626                            I915_READ(GTIIR));
627                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
628                            I915_READ(GTIMR));
629         }
630         seq_printf(m, "Interrupts received: %d\n",
631                    atomic_read(&dev_priv->irq_received));
632         for_each_ring(ring, dev_priv, i) {
633                 if (IS_GEN6(dev) || IS_GEN7(dev)) {
634                         seq_printf(m,
635                                    "Graphics Interrupt mask (%s):       %08x\n",
636                                    ring->name, I915_READ_IMR(ring));
637                 }
638                 i915_ring_seqno_info(m, ring);
639         }
640         mutex_unlock(&dev->struct_mutex);
641
642         return 0;
643 }
644
645 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
646 {
647         struct drm_info_node *node = (struct drm_info_node *) m->private;
648         struct drm_device *dev = node->minor->dev;
649         drm_i915_private_t *dev_priv = dev->dev_private;
650         int i, ret;
651
652         ret = mutex_lock_interruptible(&dev->struct_mutex);
653         if (ret)
654                 return ret;
655
656         seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
657         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
658         for (i = 0; i < dev_priv->num_fence_regs; i++) {
659                 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
660
661                 seq_printf(m, "Fence %d, pin count = %d, object = ",
662                            i, dev_priv->fence_regs[i].pin_count);
663                 if (obj == NULL)
664                         seq_puts(m, "unused");
665                 else
666                         describe_obj(m, obj);
667                 seq_putc(m, '\n');
668         }
669
670         mutex_unlock(&dev->struct_mutex);
671         return 0;
672 }
673
674 static int i915_hws_info(struct seq_file *m, void *data)
675 {
676         struct drm_info_node *node = (struct drm_info_node *) m->private;
677         struct drm_device *dev = node->minor->dev;
678         drm_i915_private_t *dev_priv = dev->dev_private;
679         struct intel_ring_buffer *ring;
680         const u32 *hws;
681         int i;
682
683         ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
684         hws = ring->status_page.page_addr;
685         if (hws == NULL)
686                 return 0;
687
688         for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
689                 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
690                            i * 4,
691                            hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
692         }
693         return 0;
694 }
695
696 static ssize_t
697 i915_error_state_write(struct file *filp,
698                        const char __user *ubuf,
699                        size_t cnt,
700                        loff_t *ppos)
701 {
702         struct i915_error_state_file_priv *error_priv = filp->private_data;
703         struct drm_device *dev = error_priv->dev;
704         int ret;
705
706         DRM_DEBUG_DRIVER("Resetting error state\n");
707
708         ret = mutex_lock_interruptible(&dev->struct_mutex);
709         if (ret)
710                 return ret;
711
712         i915_destroy_error_state(dev);
713         mutex_unlock(&dev->struct_mutex);
714
715         return cnt;
716 }
717
718 static int i915_error_state_open(struct inode *inode, struct file *file)
719 {
720         struct drm_device *dev = inode->i_private;
721         struct i915_error_state_file_priv *error_priv;
722
723         error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
724         if (!error_priv)
725                 return -ENOMEM;
726
727         error_priv->dev = dev;
728
729         i915_error_state_get(dev, error_priv);
730
731         file->private_data = error_priv;
732
733         return 0;
734 }
735
736 static int i915_error_state_release(struct inode *inode, struct file *file)
737 {
738         struct i915_error_state_file_priv *error_priv = file->private_data;
739
740         i915_error_state_put(error_priv);
741         kfree(error_priv);
742
743         return 0;
744 }
745
746 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
747                                      size_t count, loff_t *pos)
748 {
749         struct i915_error_state_file_priv *error_priv = file->private_data;
750         struct drm_i915_error_state_buf error_str;
751         loff_t tmp_pos = 0;
752         ssize_t ret_count = 0;
753         int ret;
754
755         ret = i915_error_state_buf_init(&error_str, count, *pos);
756         if (ret)
757                 return ret;
758
759         ret = i915_error_state_to_str(&error_str, error_priv);
760         if (ret)
761                 goto out;
762
763         ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
764                                             error_str.buf,
765                                             error_str.bytes);
766
767         if (ret_count < 0)
768                 ret = ret_count;
769         else
770                 *pos = error_str.start + ret_count;
771 out:
772         i915_error_state_buf_release(&error_str);
773         return ret ?: ret_count;
774 }
775
776 static const struct file_operations i915_error_state_fops = {
777         .owner = THIS_MODULE,
778         .open = i915_error_state_open,
779         .read = i915_error_state_read,
780         .write = i915_error_state_write,
781         .llseek = default_llseek,
782         .release = i915_error_state_release,
783 };
784
785 static int
786 i915_next_seqno_get(void *data, u64 *val)
787 {
788         struct drm_device *dev = data;
789         drm_i915_private_t *dev_priv = dev->dev_private;
790         int ret;
791
792         ret = mutex_lock_interruptible(&dev->struct_mutex);
793         if (ret)
794                 return ret;
795
796         *val = dev_priv->next_seqno;
797         mutex_unlock(&dev->struct_mutex);
798
799         return 0;
800 }
801
802 static int
803 i915_next_seqno_set(void *data, u64 val)
804 {
805         struct drm_device *dev = data;
806         int ret;
807
808         ret = mutex_lock_interruptible(&dev->struct_mutex);
809         if (ret)
810                 return ret;
811
812         ret = i915_gem_set_seqno(dev, val);
813         mutex_unlock(&dev->struct_mutex);
814
815         return ret;
816 }
817
818 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
819                         i915_next_seqno_get, i915_next_seqno_set,
820                         "0x%llx\n");
821
822 static int i915_rstdby_delays(struct seq_file *m, void *unused)
823 {
824         struct drm_info_node *node = (struct drm_info_node *) m->private;
825         struct drm_device *dev = node->minor->dev;
826         drm_i915_private_t *dev_priv = dev->dev_private;
827         u16 crstanddelay;
828         int ret;
829
830         ret = mutex_lock_interruptible(&dev->struct_mutex);
831         if (ret)
832                 return ret;
833
834         crstanddelay = I915_READ16(CRSTANDVID);
835
836         mutex_unlock(&dev->struct_mutex);
837
838         seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
839
840         return 0;
841 }
842
843 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
844 {
845         struct drm_info_node *node = (struct drm_info_node *) m->private;
846         struct drm_device *dev = node->minor->dev;
847         drm_i915_private_t *dev_priv = dev->dev_private;
848         int ret;
849
850         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
851
852         if (IS_GEN5(dev)) {
853                 u16 rgvswctl = I915_READ16(MEMSWCTL);
854                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
855
856                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
857                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
858                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
859                            MEMSTAT_VID_SHIFT);
860                 seq_printf(m, "Current P-state: %d\n",
861                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
862         } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
863                 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
864                 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
865                 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
866                 u32 rpstat, cagf, reqf;
867                 u32 rpupei, rpcurup, rpprevup;
868                 u32 rpdownei, rpcurdown, rpprevdown;
869                 int max_freq;
870
871                 /* RPSTAT1 is in the GT power well */
872                 ret = mutex_lock_interruptible(&dev->struct_mutex);
873                 if (ret)
874                         return ret;
875
876                 gen6_gt_force_wake_get(dev_priv);
877
878                 reqf = I915_READ(GEN6_RPNSWREQ);
879                 reqf &= ~GEN6_TURBO_DISABLE;
880                 if (IS_HASWELL(dev))
881                         reqf >>= 24;
882                 else
883                         reqf >>= 25;
884                 reqf *= GT_FREQUENCY_MULTIPLIER;
885
886                 rpstat = I915_READ(GEN6_RPSTAT1);
887                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
888                 rpcurup = I915_READ(GEN6_RP_CUR_UP);
889                 rpprevup = I915_READ(GEN6_RP_PREV_UP);
890                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
891                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
892                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
893                 if (IS_HASWELL(dev))
894                         cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
895                 else
896                         cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
897                 cagf *= GT_FREQUENCY_MULTIPLIER;
898
899                 gen6_gt_force_wake_put(dev_priv);
900                 mutex_unlock(&dev->struct_mutex);
901
902                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
903                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
904                 seq_printf(m, "Render p-state ratio: %d\n",
905                            (gt_perf_status & 0xff00) >> 8);
906                 seq_printf(m, "Render p-state VID: %d\n",
907                            gt_perf_status & 0xff);
908                 seq_printf(m, "Render p-state limit: %d\n",
909                            rp_state_limits & 0xff);
910                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
911                 seq_printf(m, "CAGF: %dMHz\n", cagf);
912                 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
913                            GEN6_CURICONT_MASK);
914                 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
915                            GEN6_CURBSYTAVG_MASK);
916                 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
917                            GEN6_CURBSYTAVG_MASK);
918                 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
919                            GEN6_CURIAVG_MASK);
920                 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
921                            GEN6_CURBSYTAVG_MASK);
922                 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
923                            GEN6_CURBSYTAVG_MASK);
924
925                 max_freq = (rp_state_cap & 0xff0000) >> 16;
926                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
927                            max_freq * GT_FREQUENCY_MULTIPLIER);
928
929                 max_freq = (rp_state_cap & 0xff00) >> 8;
930                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
931                            max_freq * GT_FREQUENCY_MULTIPLIER);
932
933                 max_freq = rp_state_cap & 0xff;
934                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
935                            max_freq * GT_FREQUENCY_MULTIPLIER);
936
937                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
938                            dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
939         } else if (IS_VALLEYVIEW(dev)) {
940                 u32 freq_sts, val;
941
942                 mutex_lock(&dev_priv->rps.hw_lock);
943                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
944                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
945                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
946
947                 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
948                 seq_printf(m, "max GPU freq: %d MHz\n",
949                            vlv_gpu_freq(dev_priv->mem_freq, val));
950
951                 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
952                 seq_printf(m, "min GPU freq: %d MHz\n",
953                            vlv_gpu_freq(dev_priv->mem_freq, val));
954
955                 seq_printf(m, "current GPU freq: %d MHz\n",
956                            vlv_gpu_freq(dev_priv->mem_freq,
957                                         (freq_sts >> 8) & 0xff));
958                 mutex_unlock(&dev_priv->rps.hw_lock);
959         } else {
960                 seq_puts(m, "no P-state info available\n");
961         }
962
963         return 0;
964 }
965
966 static int i915_delayfreq_table(struct seq_file *m, void *unused)
967 {
968         struct drm_info_node *node = (struct drm_info_node *) m->private;
969         struct drm_device *dev = node->minor->dev;
970         drm_i915_private_t *dev_priv = dev->dev_private;
971         u32 delayfreq;
972         int ret, i;
973
974         ret = mutex_lock_interruptible(&dev->struct_mutex);
975         if (ret)
976                 return ret;
977
978         for (i = 0; i < 16; i++) {
979                 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
980                 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
981                            (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
982         }
983
984         mutex_unlock(&dev->struct_mutex);
985
986         return 0;
987 }
988
989 static inline int MAP_TO_MV(int map)
990 {
991         return 1250 - (map * 25);
992 }
993
994 static int i915_inttoext_table(struct seq_file *m, void *unused)
995 {
996         struct drm_info_node *node = (struct drm_info_node *) m->private;
997         struct drm_device *dev = node->minor->dev;
998         drm_i915_private_t *dev_priv = dev->dev_private;
999         u32 inttoext;
1000         int ret, i;
1001
1002         ret = mutex_lock_interruptible(&dev->struct_mutex);
1003         if (ret)
1004                 return ret;
1005
1006         for (i = 1; i <= 32; i++) {
1007                 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1008                 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1009         }
1010
1011         mutex_unlock(&dev->struct_mutex);
1012
1013         return 0;
1014 }
1015
1016 static int ironlake_drpc_info(struct seq_file *m)
1017 {
1018         struct drm_info_node *node = (struct drm_info_node *) m->private;
1019         struct drm_device *dev = node->minor->dev;
1020         drm_i915_private_t *dev_priv = dev->dev_private;
1021         u32 rgvmodectl, rstdbyctl;
1022         u16 crstandvid;
1023         int ret;
1024
1025         ret = mutex_lock_interruptible(&dev->struct_mutex);
1026         if (ret)
1027                 return ret;
1028
1029         rgvmodectl = I915_READ(MEMMODECTL);
1030         rstdbyctl = I915_READ(RSTDBYCTL);
1031         crstandvid = I915_READ16(CRSTANDVID);
1032
1033         mutex_unlock(&dev->struct_mutex);
1034
1035         seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1036                    "yes" : "no");
1037         seq_printf(m, "Boost freq: %d\n",
1038                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1039                    MEMMODE_BOOST_FREQ_SHIFT);
1040         seq_printf(m, "HW control enabled: %s\n",
1041                    rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1042         seq_printf(m, "SW control enabled: %s\n",
1043                    rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1044         seq_printf(m, "Gated voltage change: %s\n",
1045                    rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1046         seq_printf(m, "Starting frequency: P%d\n",
1047                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1048         seq_printf(m, "Max P-state: P%d\n",
1049                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1050         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1051         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1052         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1053         seq_printf(m, "Render standby enabled: %s\n",
1054                    (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1055         seq_puts(m, "Current RS state: ");
1056         switch (rstdbyctl & RSX_STATUS_MASK) {
1057         case RSX_STATUS_ON:
1058                 seq_puts(m, "on\n");
1059                 break;
1060         case RSX_STATUS_RC1:
1061                 seq_puts(m, "RC1\n");
1062                 break;
1063         case RSX_STATUS_RC1E:
1064                 seq_puts(m, "RC1E\n");
1065                 break;
1066         case RSX_STATUS_RS1:
1067                 seq_puts(m, "RS1\n");
1068                 break;
1069         case RSX_STATUS_RS2:
1070                 seq_puts(m, "RS2 (RC6)\n");
1071                 break;
1072         case RSX_STATUS_RS3:
1073                 seq_puts(m, "RC3 (RC6+)\n");
1074                 break;
1075         default:
1076                 seq_puts(m, "unknown\n");
1077                 break;
1078         }
1079
1080         return 0;
1081 }
1082
1083 static int gen6_drpc_info(struct seq_file *m)
1084 {
1085
1086         struct drm_info_node *node = (struct drm_info_node *) m->private;
1087         struct drm_device *dev = node->minor->dev;
1088         struct drm_i915_private *dev_priv = dev->dev_private;
1089         u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1090         unsigned forcewake_count;
1091         int count = 0, ret;
1092
1093         ret = mutex_lock_interruptible(&dev->struct_mutex);
1094         if (ret)
1095                 return ret;
1096
1097         spin_lock_irq(&dev_priv->uncore.lock);
1098         forcewake_count = dev_priv->uncore.forcewake_count;
1099         spin_unlock_irq(&dev_priv->uncore.lock);
1100
1101         if (forcewake_count) {
1102                 seq_puts(m, "RC information inaccurate because somebody "
1103                             "holds a forcewake reference \n");
1104         } else {
1105                 /* NB: we cannot use forcewake, else we read the wrong values */
1106                 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1107                         udelay(10);
1108                 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1109         }
1110
1111         gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1112         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1113
1114         rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1115         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1116         mutex_unlock(&dev->struct_mutex);
1117         mutex_lock(&dev_priv->rps.hw_lock);
1118         sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1119         mutex_unlock(&dev_priv->rps.hw_lock);
1120
1121         seq_printf(m, "Video Turbo Mode: %s\n",
1122                    yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1123         seq_printf(m, "HW control enabled: %s\n",
1124                    yesno(rpmodectl1 & GEN6_RP_ENABLE));
1125         seq_printf(m, "SW control enabled: %s\n",
1126                    yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1127                           GEN6_RP_MEDIA_SW_MODE));
1128         seq_printf(m, "RC1e Enabled: %s\n",
1129                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1130         seq_printf(m, "RC6 Enabled: %s\n",
1131                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1132         seq_printf(m, "Deep RC6 Enabled: %s\n",
1133                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1134         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1135                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1136         seq_puts(m, "Current RC state: ");
1137         switch (gt_core_status & GEN6_RCn_MASK) {
1138         case GEN6_RC0:
1139                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1140                         seq_puts(m, "Core Power Down\n");
1141                 else
1142                         seq_puts(m, "on\n");
1143                 break;
1144         case GEN6_RC3:
1145                 seq_puts(m, "RC3\n");
1146                 break;
1147         case GEN6_RC6:
1148                 seq_puts(m, "RC6\n");
1149                 break;
1150         case GEN6_RC7:
1151                 seq_puts(m, "RC7\n");
1152                 break;
1153         default:
1154                 seq_puts(m, "Unknown\n");
1155                 break;
1156         }
1157
1158         seq_printf(m, "Core Power Down: %s\n",
1159                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1160
1161         /* Not exactly sure what this is */
1162         seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1163                    I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1164         seq_printf(m, "RC6 residency since boot: %u\n",
1165                    I915_READ(GEN6_GT_GFX_RC6));
1166         seq_printf(m, "RC6+ residency since boot: %u\n",
1167                    I915_READ(GEN6_GT_GFX_RC6p));
1168         seq_printf(m, "RC6++ residency since boot: %u\n",
1169                    I915_READ(GEN6_GT_GFX_RC6pp));
1170
1171         seq_printf(m, "RC6   voltage: %dmV\n",
1172                    GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1173         seq_printf(m, "RC6+  voltage: %dmV\n",
1174                    GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1175         seq_printf(m, "RC6++ voltage: %dmV\n",
1176                    GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1177         return 0;
1178 }
1179
1180 static int i915_drpc_info(struct seq_file *m, void *unused)
1181 {
1182         struct drm_info_node *node = (struct drm_info_node *) m->private;
1183         struct drm_device *dev = node->minor->dev;
1184
1185         if (IS_GEN6(dev) || IS_GEN7(dev))
1186                 return gen6_drpc_info(m);
1187         else
1188                 return ironlake_drpc_info(m);
1189 }
1190
1191 static int i915_fbc_status(struct seq_file *m, void *unused)
1192 {
1193         struct drm_info_node *node = (struct drm_info_node *) m->private;
1194         struct drm_device *dev = node->minor->dev;
1195         drm_i915_private_t *dev_priv = dev->dev_private;
1196
1197         if (!I915_HAS_FBC(dev)) {
1198                 seq_puts(m, "FBC unsupported on this chipset\n");
1199                 return 0;
1200         }
1201
1202         if (intel_fbc_enabled(dev)) {
1203                 seq_puts(m, "FBC enabled\n");
1204         } else {
1205                 seq_puts(m, "FBC disabled: ");
1206                 switch (dev_priv->fbc.no_fbc_reason) {
1207                 case FBC_OK:
1208                         seq_puts(m, "FBC actived, but currently disabled in hardware");
1209                         break;
1210                 case FBC_UNSUPPORTED:
1211                         seq_puts(m, "unsupported by this chipset");
1212                         break;
1213                 case FBC_NO_OUTPUT:
1214                         seq_puts(m, "no outputs");
1215                         break;
1216                 case FBC_STOLEN_TOO_SMALL:
1217                         seq_puts(m, "not enough stolen memory");
1218                         break;
1219                 case FBC_UNSUPPORTED_MODE:
1220                         seq_puts(m, "mode not supported");
1221                         break;
1222                 case FBC_MODE_TOO_LARGE:
1223                         seq_puts(m, "mode too large");
1224                         break;
1225                 case FBC_BAD_PLANE:
1226                         seq_puts(m, "FBC unsupported on plane");
1227                         break;
1228                 case FBC_NOT_TILED:
1229                         seq_puts(m, "scanout buffer not tiled");
1230                         break;
1231                 case FBC_MULTIPLE_PIPES:
1232                         seq_puts(m, "multiple pipes are enabled");
1233                         break;
1234                 case FBC_MODULE_PARAM:
1235                         seq_puts(m, "disabled per module param (default off)");
1236                         break;
1237                 case FBC_CHIP_DEFAULT:
1238                         seq_puts(m, "disabled per chip default");
1239                         break;
1240                 default:
1241                         seq_puts(m, "unknown reason");
1242                 }
1243                 seq_putc(m, '\n');
1244         }
1245         return 0;
1246 }
1247
1248 static int i915_ips_status(struct seq_file *m, void *unused)
1249 {
1250         struct drm_info_node *node = (struct drm_info_node *) m->private;
1251         struct drm_device *dev = node->minor->dev;
1252         struct drm_i915_private *dev_priv = dev->dev_private;
1253
1254         if (!HAS_IPS(dev)) {
1255                 seq_puts(m, "not supported\n");
1256                 return 0;
1257         }
1258
1259         if (I915_READ(IPS_CTL) & IPS_ENABLE)
1260                 seq_puts(m, "enabled\n");
1261         else
1262                 seq_puts(m, "disabled\n");
1263
1264         return 0;
1265 }
1266
1267 static int i915_sr_status(struct seq_file *m, void *unused)
1268 {
1269         struct drm_info_node *node = (struct drm_info_node *) m->private;
1270         struct drm_device *dev = node->minor->dev;
1271         drm_i915_private_t *dev_priv = dev->dev_private;
1272         bool sr_enabled = false;
1273
1274         if (HAS_PCH_SPLIT(dev))
1275                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1276         else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1277                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1278         else if (IS_I915GM(dev))
1279                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1280         else if (IS_PINEVIEW(dev))
1281                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1282
1283         seq_printf(m, "self-refresh: %s\n",
1284                    sr_enabled ? "enabled" : "disabled");
1285
1286         return 0;
1287 }
1288
1289 static int i915_emon_status(struct seq_file *m, void *unused)
1290 {
1291         struct drm_info_node *node = (struct drm_info_node *) m->private;
1292         struct drm_device *dev = node->minor->dev;
1293         drm_i915_private_t *dev_priv = dev->dev_private;
1294         unsigned long temp, chipset, gfx;
1295         int ret;
1296
1297         if (!IS_GEN5(dev))
1298                 return -ENODEV;
1299
1300         ret = mutex_lock_interruptible(&dev->struct_mutex);
1301         if (ret)
1302                 return ret;
1303
1304         temp = i915_mch_val(dev_priv);
1305         chipset = i915_chipset_val(dev_priv);
1306         gfx = i915_gfx_val(dev_priv);
1307         mutex_unlock(&dev->struct_mutex);
1308
1309         seq_printf(m, "GMCH temp: %ld\n", temp);
1310         seq_printf(m, "Chipset power: %ld\n", chipset);
1311         seq_printf(m, "GFX power: %ld\n", gfx);
1312         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1313
1314         return 0;
1315 }
1316
1317 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1318 {
1319         struct drm_info_node *node = (struct drm_info_node *) m->private;
1320         struct drm_device *dev = node->minor->dev;
1321         drm_i915_private_t *dev_priv = dev->dev_private;
1322         int ret;
1323         int gpu_freq, ia_freq;
1324
1325         if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1326                 seq_puts(m, "unsupported on this chipset\n");
1327                 return 0;
1328         }
1329
1330         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1331
1332         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1333         if (ret)
1334                 return ret;
1335
1336         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1337
1338         for (gpu_freq = dev_priv->rps.min_delay;
1339              gpu_freq <= dev_priv->rps.max_delay;
1340              gpu_freq++) {
1341                 ia_freq = gpu_freq;
1342                 sandybridge_pcode_read(dev_priv,
1343                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1344                                        &ia_freq);
1345                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1346                            gpu_freq * GT_FREQUENCY_MULTIPLIER,
1347                            ((ia_freq >> 0) & 0xff) * 100,
1348                            ((ia_freq >> 8) & 0xff) * 100);
1349         }
1350
1351         mutex_unlock(&dev_priv->rps.hw_lock);
1352
1353         return 0;
1354 }
1355
1356 static int i915_gfxec(struct seq_file *m, void *unused)
1357 {
1358         struct drm_info_node *node = (struct drm_info_node *) m->private;
1359         struct drm_device *dev = node->minor->dev;
1360         drm_i915_private_t *dev_priv = dev->dev_private;
1361         int ret;
1362
1363         ret = mutex_lock_interruptible(&dev->struct_mutex);
1364         if (ret)
1365                 return ret;
1366
1367         seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1368
1369         mutex_unlock(&dev->struct_mutex);
1370
1371         return 0;
1372 }
1373
1374 static int i915_opregion(struct seq_file *m, void *unused)
1375 {
1376         struct drm_info_node *node = (struct drm_info_node *) m->private;
1377         struct drm_device *dev = node->minor->dev;
1378         drm_i915_private_t *dev_priv = dev->dev_private;
1379         struct intel_opregion *opregion = &dev_priv->opregion;
1380         void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1381         int ret;
1382
1383         if (data == NULL)
1384                 return -ENOMEM;
1385
1386         ret = mutex_lock_interruptible(&dev->struct_mutex);
1387         if (ret)
1388                 goto out;
1389
1390         if (opregion->header) {
1391                 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1392                 seq_write(m, data, OPREGION_SIZE);
1393         }
1394
1395         mutex_unlock(&dev->struct_mutex);
1396
1397 out:
1398         kfree(data);
1399         return 0;
1400 }
1401
1402 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1403 {
1404         struct drm_info_node *node = (struct drm_info_node *) m->private;
1405         struct drm_device *dev = node->minor->dev;
1406         struct intel_fbdev *ifbdev = NULL;
1407         struct intel_framebuffer *fb;
1408
1409 #ifdef CONFIG_DRM_I915_FBDEV
1410         struct drm_i915_private *dev_priv = dev->dev_private;
1411         int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1412         if (ret)
1413                 return ret;
1414
1415         ifbdev = dev_priv->fbdev;
1416         fb = to_intel_framebuffer(ifbdev->helper.fb);
1417
1418         seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1419                    fb->base.width,
1420                    fb->base.height,
1421                    fb->base.depth,
1422                    fb->base.bits_per_pixel,
1423                    atomic_read(&fb->base.refcount.refcount));
1424         describe_obj(m, fb->obj);
1425         seq_putc(m, '\n');
1426         mutex_unlock(&dev->mode_config.mutex);
1427 #endif
1428
1429         mutex_lock(&dev->mode_config.fb_lock);
1430         list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1431                 if (&fb->base == ifbdev->helper.fb)
1432                         continue;
1433
1434                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1435                            fb->base.width,
1436                            fb->base.height,
1437                            fb->base.depth,
1438                            fb->base.bits_per_pixel,
1439                            atomic_read(&fb->base.refcount.refcount));
1440                 describe_obj(m, fb->obj);
1441                 seq_putc(m, '\n');
1442         }
1443         mutex_unlock(&dev->mode_config.fb_lock);
1444
1445         return 0;
1446 }
1447
1448 static int i915_context_status(struct seq_file *m, void *unused)
1449 {
1450         struct drm_info_node *node = (struct drm_info_node *) m->private;
1451         struct drm_device *dev = node->minor->dev;
1452         drm_i915_private_t *dev_priv = dev->dev_private;
1453         struct intel_ring_buffer *ring;
1454         struct i915_hw_context *ctx;
1455         int ret, i;
1456
1457         ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1458         if (ret)
1459                 return ret;
1460
1461         if (dev_priv->ips.pwrctx) {
1462                 seq_puts(m, "power context ");
1463                 describe_obj(m, dev_priv->ips.pwrctx);
1464                 seq_putc(m, '\n');
1465         }
1466
1467         if (dev_priv->ips.renderctx) {
1468                 seq_puts(m, "render context ");
1469                 describe_obj(m, dev_priv->ips.renderctx);
1470                 seq_putc(m, '\n');
1471         }
1472
1473         list_for_each_entry(ctx, &dev_priv->context_list, link) {
1474                 seq_puts(m, "HW context ");
1475                 describe_ctx(m, ctx);
1476                 for_each_ring(ring, dev_priv, i)
1477                         if (ring->default_context == ctx)
1478                                 seq_printf(m, "(default context %s) ", ring->name);
1479
1480                 describe_obj(m, ctx->obj);
1481                 seq_putc(m, '\n');
1482         }
1483
1484         mutex_unlock(&dev->mode_config.mutex);
1485
1486         return 0;
1487 }
1488
1489 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1490 {
1491         struct drm_info_node *node = (struct drm_info_node *) m->private;
1492         struct drm_device *dev = node->minor->dev;
1493         struct drm_i915_private *dev_priv = dev->dev_private;
1494         unsigned forcewake_count;
1495
1496         spin_lock_irq(&dev_priv->uncore.lock);
1497         forcewake_count = dev_priv->uncore.forcewake_count;
1498         spin_unlock_irq(&dev_priv->uncore.lock);
1499
1500         seq_printf(m, "forcewake count = %u\n", forcewake_count);
1501
1502         return 0;
1503 }
1504
1505 static const char *swizzle_string(unsigned swizzle)
1506 {
1507         switch (swizzle) {
1508         case I915_BIT_6_SWIZZLE_NONE:
1509                 return "none";
1510         case I915_BIT_6_SWIZZLE_9:
1511                 return "bit9";
1512         case I915_BIT_6_SWIZZLE_9_10:
1513                 return "bit9/bit10";
1514         case I915_BIT_6_SWIZZLE_9_11:
1515                 return "bit9/bit11";
1516         case I915_BIT_6_SWIZZLE_9_10_11:
1517                 return "bit9/bit10/bit11";
1518         case I915_BIT_6_SWIZZLE_9_17:
1519                 return "bit9/bit17";
1520         case I915_BIT_6_SWIZZLE_9_10_17:
1521                 return "bit9/bit10/bit17";
1522         case I915_BIT_6_SWIZZLE_UNKNOWN:
1523                 return "unknown";
1524         }
1525
1526         return "bug";
1527 }
1528
1529 static int i915_swizzle_info(struct seq_file *m, void *data)
1530 {
1531         struct drm_info_node *node = (struct drm_info_node *) m->private;
1532         struct drm_device *dev = node->minor->dev;
1533         struct drm_i915_private *dev_priv = dev->dev_private;
1534         int ret;
1535
1536         ret = mutex_lock_interruptible(&dev->struct_mutex);
1537         if (ret)
1538                 return ret;
1539
1540         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1541                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1542         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1543                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1544
1545         if (IS_GEN3(dev) || IS_GEN4(dev)) {
1546                 seq_printf(m, "DDC = 0x%08x\n",
1547                            I915_READ(DCC));
1548                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1549                            I915_READ16(C0DRB3));
1550                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1551                            I915_READ16(C1DRB3));
1552         } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1553                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1554                            I915_READ(MAD_DIMM_C0));
1555                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1556                            I915_READ(MAD_DIMM_C1));
1557                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1558                            I915_READ(MAD_DIMM_C2));
1559                 seq_printf(m, "TILECTL = 0x%08x\n",
1560                            I915_READ(TILECTL));
1561                 seq_printf(m, "ARB_MODE = 0x%08x\n",
1562                            I915_READ(ARB_MODE));
1563                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1564                            I915_READ(DISP_ARB_CTL));
1565         }
1566         mutex_unlock(&dev->struct_mutex);
1567
1568         return 0;
1569 }
1570
1571 static int i915_ppgtt_info(struct seq_file *m, void *data)
1572 {
1573         struct drm_info_node *node = (struct drm_info_node *) m->private;
1574         struct drm_device *dev = node->minor->dev;
1575         struct drm_i915_private *dev_priv = dev->dev_private;
1576         struct intel_ring_buffer *ring;
1577         int i, ret;
1578
1579
1580         ret = mutex_lock_interruptible(&dev->struct_mutex);
1581         if (ret)
1582                 return ret;
1583         if (INTEL_INFO(dev)->gen == 6)
1584                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1585
1586         for_each_ring(ring, dev_priv, i) {
1587                 seq_printf(m, "%s\n", ring->name);
1588                 if (INTEL_INFO(dev)->gen == 7)
1589                         seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1590                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1591                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1592                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1593         }
1594         if (dev_priv->mm.aliasing_ppgtt) {
1595                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1596
1597                 seq_puts(m, "aliasing PPGTT:\n");
1598                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1599         }
1600         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1601         mutex_unlock(&dev->struct_mutex);
1602
1603         return 0;
1604 }
1605
1606 static int i915_dpio_info(struct seq_file *m, void *data)
1607 {
1608         struct drm_info_node *node = (struct drm_info_node *) m->private;
1609         struct drm_device *dev = node->minor->dev;
1610         struct drm_i915_private *dev_priv = dev->dev_private;
1611         int ret;
1612
1613
1614         if (!IS_VALLEYVIEW(dev)) {
1615                 seq_puts(m, "unsupported\n");
1616                 return 0;
1617         }
1618
1619         ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1620         if (ret)
1621                 return ret;
1622
1623         seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1624
1625         seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1626                    vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
1627         seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1628                    vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
1629
1630         seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1631                    vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
1632         seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1633                    vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
1634
1635         seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1636                    vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
1637         seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1638                    vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
1639
1640         seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1641                    vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
1642         seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1643                    vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
1644
1645         seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1646                    vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
1647
1648         mutex_unlock(&dev_priv->dpio_lock);
1649
1650         return 0;
1651 }
1652
1653 static int i915_llc(struct seq_file *m, void *data)
1654 {
1655         struct drm_info_node *node = (struct drm_info_node *) m->private;
1656         struct drm_device *dev = node->minor->dev;
1657         struct drm_i915_private *dev_priv = dev->dev_private;
1658
1659         /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1660         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1661         seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1662
1663         return 0;
1664 }
1665
1666 static int i915_edp_psr_status(struct seq_file *m, void *data)
1667 {
1668         struct drm_info_node *node = m->private;
1669         struct drm_device *dev = node->minor->dev;
1670         struct drm_i915_private *dev_priv = dev->dev_private;
1671         u32 psrperf = 0;
1672         bool enabled = false;
1673
1674         seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1675         seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1676
1677         enabled = HAS_PSR(dev) &&
1678                 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1679         seq_printf(m, "Enabled: %s\n", yesno(enabled));
1680
1681         if (HAS_PSR(dev))
1682                 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1683                         EDP_PSR_PERF_CNT_MASK;
1684         seq_printf(m, "Performance_Counter: %u\n", psrperf);
1685
1686         return 0;
1687 }
1688
1689 static int i915_energy_uJ(struct seq_file *m, void *data)
1690 {
1691         struct drm_info_node *node = m->private;
1692         struct drm_device *dev = node->minor->dev;
1693         struct drm_i915_private *dev_priv = dev->dev_private;
1694         u64 power;
1695         u32 units;
1696
1697         if (INTEL_INFO(dev)->gen < 6)
1698                 return -ENODEV;
1699
1700         rdmsrl(MSR_RAPL_POWER_UNIT, power);
1701         power = (power & 0x1f00) >> 8;
1702         units = 1000000 / (1 << power); /* convert to uJ */
1703         power = I915_READ(MCH_SECP_NRG_STTS);
1704         power *= units;
1705
1706         seq_printf(m, "%llu", (long long unsigned)power);
1707
1708         return 0;
1709 }
1710
1711 static int i915_pc8_status(struct seq_file *m, void *unused)
1712 {
1713         struct drm_info_node *node = (struct drm_info_node *) m->private;
1714         struct drm_device *dev = node->minor->dev;
1715         struct drm_i915_private *dev_priv = dev->dev_private;
1716
1717         if (!IS_HASWELL(dev)) {
1718                 seq_puts(m, "not supported\n");
1719                 return 0;
1720         }
1721
1722         mutex_lock(&dev_priv->pc8.lock);
1723         seq_printf(m, "Requirements met: %s\n",
1724                    yesno(dev_priv->pc8.requirements_met));
1725         seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
1726         seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
1727         seq_printf(m, "IRQs disabled: %s\n",
1728                    yesno(dev_priv->pc8.irqs_disabled));
1729         seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
1730         mutex_unlock(&dev_priv->pc8.lock);
1731
1732         return 0;
1733 }
1734
1735 static int
1736 i915_wedged_get(void *data, u64 *val)
1737 {
1738         struct drm_device *dev = data;
1739         drm_i915_private_t *dev_priv = dev->dev_private;
1740
1741         *val = atomic_read(&dev_priv->gpu_error.reset_counter);
1742
1743         return 0;
1744 }
1745
1746 static int
1747 i915_wedged_set(void *data, u64 val)
1748 {
1749         struct drm_device *dev = data;
1750
1751         DRM_INFO("Manually setting wedged to %llu\n", val);
1752         i915_handle_error(dev, val);
1753
1754         return 0;
1755 }
1756
1757 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1758                         i915_wedged_get, i915_wedged_set,
1759                         "%llu\n");
1760
1761 static int
1762 i915_ring_stop_get(void *data, u64 *val)
1763 {
1764         struct drm_device *dev = data;
1765         drm_i915_private_t *dev_priv = dev->dev_private;
1766
1767         *val = dev_priv->gpu_error.stop_rings;
1768
1769         return 0;
1770 }
1771
1772 static int
1773 i915_ring_stop_set(void *data, u64 val)
1774 {
1775         struct drm_device *dev = data;
1776         struct drm_i915_private *dev_priv = dev->dev_private;
1777         int ret;
1778
1779         DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
1780
1781         ret = mutex_lock_interruptible(&dev->struct_mutex);
1782         if (ret)
1783                 return ret;
1784
1785         dev_priv->gpu_error.stop_rings = val;
1786         mutex_unlock(&dev->struct_mutex);
1787
1788         return 0;
1789 }
1790
1791 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1792                         i915_ring_stop_get, i915_ring_stop_set,
1793                         "0x%08llx\n");
1794
1795 static int
1796 i915_ring_missed_irq_get(void *data, u64 *val)
1797 {
1798         struct drm_device *dev = data;
1799         struct drm_i915_private *dev_priv = dev->dev_private;
1800
1801         *val = dev_priv->gpu_error.missed_irq_rings;
1802         return 0;
1803 }
1804
1805 static int
1806 i915_ring_missed_irq_set(void *data, u64 val)
1807 {
1808         struct drm_device *dev = data;
1809         struct drm_i915_private *dev_priv = dev->dev_private;
1810         int ret;
1811
1812         /* Lock against concurrent debugfs callers */
1813         ret = mutex_lock_interruptible(&dev->struct_mutex);
1814         if (ret)
1815                 return ret;
1816         dev_priv->gpu_error.missed_irq_rings = val;
1817         mutex_unlock(&dev->struct_mutex);
1818
1819         return 0;
1820 }
1821
1822 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
1823                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
1824                         "0x%08llx\n");
1825
1826 static int
1827 i915_ring_test_irq_get(void *data, u64 *val)
1828 {
1829         struct drm_device *dev = data;
1830         struct drm_i915_private *dev_priv = dev->dev_private;
1831
1832         *val = dev_priv->gpu_error.test_irq_rings;
1833
1834         return 0;
1835 }
1836
1837 static int
1838 i915_ring_test_irq_set(void *data, u64 val)
1839 {
1840         struct drm_device *dev = data;
1841         struct drm_i915_private *dev_priv = dev->dev_private;
1842         int ret;
1843
1844         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
1845
1846         /* Lock against concurrent debugfs callers */
1847         ret = mutex_lock_interruptible(&dev->struct_mutex);
1848         if (ret)
1849                 return ret;
1850
1851         dev_priv->gpu_error.test_irq_rings = val;
1852         mutex_unlock(&dev->struct_mutex);
1853
1854         return 0;
1855 }
1856
1857 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
1858                         i915_ring_test_irq_get, i915_ring_test_irq_set,
1859                         "0x%08llx\n");
1860
1861 #define DROP_UNBOUND 0x1
1862 #define DROP_BOUND 0x2
1863 #define DROP_RETIRE 0x4
1864 #define DROP_ACTIVE 0x8
1865 #define DROP_ALL (DROP_UNBOUND | \
1866                   DROP_BOUND | \
1867                   DROP_RETIRE | \
1868                   DROP_ACTIVE)
1869 static int
1870 i915_drop_caches_get(void *data, u64 *val)
1871 {
1872         *val = DROP_ALL;
1873
1874         return 0;
1875 }
1876
1877 static int
1878 i915_drop_caches_set(void *data, u64 val)
1879 {
1880         struct drm_device *dev = data;
1881         struct drm_i915_private *dev_priv = dev->dev_private;
1882         struct drm_i915_gem_object *obj, *next;
1883         struct i915_address_space *vm;
1884         struct i915_vma *vma, *x;
1885         int ret;
1886
1887         DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
1888
1889         /* No need to check and wait for gpu resets, only libdrm auto-restarts
1890          * on ioctls on -EAGAIN. */
1891         ret = mutex_lock_interruptible(&dev->struct_mutex);
1892         if (ret)
1893                 return ret;
1894
1895         if (val & DROP_ACTIVE) {
1896                 ret = i915_gpu_idle(dev);
1897                 if (ret)
1898                         goto unlock;
1899         }
1900
1901         if (val & (DROP_RETIRE | DROP_ACTIVE))
1902                 i915_gem_retire_requests(dev);
1903
1904         if (val & DROP_BOUND) {
1905                 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
1906                         list_for_each_entry_safe(vma, x, &vm->inactive_list,
1907                                                  mm_list) {
1908                                 if (vma->obj->pin_count)
1909                                         continue;
1910
1911                                 ret = i915_vma_unbind(vma);
1912                                 if (ret)
1913                                         goto unlock;
1914                         }
1915                 }
1916         }
1917
1918         if (val & DROP_UNBOUND) {
1919                 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1920                                          global_list)
1921                         if (obj->pages_pin_count == 0) {
1922                                 ret = i915_gem_object_put_pages(obj);
1923                                 if (ret)
1924                                         goto unlock;
1925                         }
1926         }
1927
1928 unlock:
1929         mutex_unlock(&dev->struct_mutex);
1930
1931         return ret;
1932 }
1933
1934 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
1935                         i915_drop_caches_get, i915_drop_caches_set,
1936                         "0x%08llx\n");
1937
1938 static int
1939 i915_max_freq_get(void *data, u64 *val)
1940 {
1941         struct drm_device *dev = data;
1942         drm_i915_private_t *dev_priv = dev->dev_private;
1943         int ret;
1944
1945         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1946                 return -ENODEV;
1947
1948         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1949
1950         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1951         if (ret)
1952                 return ret;
1953
1954         if (IS_VALLEYVIEW(dev))
1955                 *val = vlv_gpu_freq(dev_priv->mem_freq,
1956                                     dev_priv->rps.max_delay);
1957         else
1958                 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
1959         mutex_unlock(&dev_priv->rps.hw_lock);
1960
1961         return 0;
1962 }
1963
1964 static int
1965 i915_max_freq_set(void *data, u64 val)
1966 {
1967         struct drm_device *dev = data;
1968         struct drm_i915_private *dev_priv = dev->dev_private;
1969         int ret;
1970
1971         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1972                 return -ENODEV;
1973
1974         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1975
1976         DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
1977
1978         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1979         if (ret)
1980                 return ret;
1981
1982         /*
1983          * Turbo will still be enabled, but won't go above the set value.
1984          */
1985         if (IS_VALLEYVIEW(dev)) {
1986                 val = vlv_freq_opcode(dev_priv->mem_freq, val);
1987                 dev_priv->rps.max_delay = val;
1988                 gen6_set_rps(dev, val);
1989         } else {
1990                 do_div(val, GT_FREQUENCY_MULTIPLIER);
1991                 dev_priv->rps.max_delay = val;
1992                 gen6_set_rps(dev, val);
1993         }
1994
1995         mutex_unlock(&dev_priv->rps.hw_lock);
1996
1997         return 0;
1998 }
1999
2000 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
2001                         i915_max_freq_get, i915_max_freq_set,
2002                         "%llu\n");
2003
2004 static int
2005 i915_min_freq_get(void *data, u64 *val)
2006 {
2007         struct drm_device *dev = data;
2008         drm_i915_private_t *dev_priv = dev->dev_private;
2009         int ret;
2010
2011         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2012                 return -ENODEV;
2013
2014         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2015
2016         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2017         if (ret)
2018                 return ret;
2019
2020         if (IS_VALLEYVIEW(dev))
2021                 *val = vlv_gpu_freq(dev_priv->mem_freq,
2022                                     dev_priv->rps.min_delay);
2023         else
2024                 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
2025         mutex_unlock(&dev_priv->rps.hw_lock);
2026
2027         return 0;
2028 }
2029
2030 static int
2031 i915_min_freq_set(void *data, u64 val)
2032 {
2033         struct drm_device *dev = data;
2034         struct drm_i915_private *dev_priv = dev->dev_private;
2035         int ret;
2036
2037         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2038                 return -ENODEV;
2039
2040         flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2041
2042         DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
2043
2044         ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2045         if (ret)
2046                 return ret;
2047
2048         /*
2049          * Turbo will still be enabled, but won't go below the set value.
2050          */
2051         if (IS_VALLEYVIEW(dev)) {
2052                 val = vlv_freq_opcode(dev_priv->mem_freq, val);
2053                 dev_priv->rps.min_delay = val;
2054                 valleyview_set_rps(dev, val);
2055         } else {
2056                 do_div(val, GT_FREQUENCY_MULTIPLIER);
2057                 dev_priv->rps.min_delay = val;
2058                 gen6_set_rps(dev, val);
2059         }
2060         mutex_unlock(&dev_priv->rps.hw_lock);
2061
2062         return 0;
2063 }
2064
2065 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
2066                         i915_min_freq_get, i915_min_freq_set,
2067                         "%llu\n");
2068
2069 static int
2070 i915_cache_sharing_get(void *data, u64 *val)
2071 {
2072         struct drm_device *dev = data;
2073         drm_i915_private_t *dev_priv = dev->dev_private;
2074         u32 snpcr;
2075         int ret;
2076
2077         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2078                 return -ENODEV;
2079
2080         ret = mutex_lock_interruptible(&dev->struct_mutex);
2081         if (ret)
2082                 return ret;
2083
2084         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2085         mutex_unlock(&dev_priv->dev->struct_mutex);
2086
2087         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
2088
2089         return 0;
2090 }
2091
2092 static int
2093 i915_cache_sharing_set(void *data, u64 val)
2094 {
2095         struct drm_device *dev = data;
2096         struct drm_i915_private *dev_priv = dev->dev_private;
2097         u32 snpcr;
2098
2099         if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2100                 return -ENODEV;
2101
2102         if (val > 3)
2103                 return -EINVAL;
2104
2105         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
2106
2107         /* Update the cache sharing policy here as well */
2108         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2109         snpcr &= ~GEN6_MBC_SNPCR_MASK;
2110         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
2111         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
2112
2113         return 0;
2114 }
2115
2116 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2117                         i915_cache_sharing_get, i915_cache_sharing_set,
2118                         "%llu\n");
2119
2120 /* As the drm_debugfs_init() routines are called before dev->dev_private is
2121  * allocated we need to hook into the minor for release. */
2122 static int
2123 drm_add_fake_info_node(struct drm_minor *minor,
2124                        struct dentry *ent,
2125                        const void *key)
2126 {
2127         struct drm_info_node *node;
2128
2129         node = kmalloc(sizeof(*node), GFP_KERNEL);
2130         if (node == NULL) {
2131                 debugfs_remove(ent);
2132                 return -ENOMEM;
2133         }
2134
2135         node->minor = minor;
2136         node->dent = ent;
2137         node->info_ent = (void *) key;
2138
2139         mutex_lock(&minor->debugfs_lock);
2140         list_add(&node->list, &minor->debugfs_list);
2141         mutex_unlock(&minor->debugfs_lock);
2142
2143         return 0;
2144 }
2145
2146 static int i915_forcewake_open(struct inode *inode, struct file *file)
2147 {
2148         struct drm_device *dev = inode->i_private;
2149         struct drm_i915_private *dev_priv = dev->dev_private;
2150
2151         if (INTEL_INFO(dev)->gen < 6)
2152                 return 0;
2153
2154         gen6_gt_force_wake_get(dev_priv);
2155
2156         return 0;
2157 }
2158
2159 static int i915_forcewake_release(struct inode *inode, struct file *file)
2160 {
2161         struct drm_device *dev = inode->i_private;
2162         struct drm_i915_private *dev_priv = dev->dev_private;
2163
2164         if (INTEL_INFO(dev)->gen < 6)
2165                 return 0;
2166
2167         gen6_gt_force_wake_put(dev_priv);
2168
2169         return 0;
2170 }
2171
2172 static const struct file_operations i915_forcewake_fops = {
2173         .owner = THIS_MODULE,
2174         .open = i915_forcewake_open,
2175         .release = i915_forcewake_release,
2176 };
2177
2178 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
2179 {
2180         struct drm_device *dev = minor->dev;
2181         struct dentry *ent;
2182
2183         ent = debugfs_create_file("i915_forcewake_user",
2184                                   S_IRUSR,
2185                                   root, dev,
2186                                   &i915_forcewake_fops);
2187         if (IS_ERR(ent))
2188                 return PTR_ERR(ent);
2189
2190         return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
2191 }
2192
2193 static int i915_debugfs_create(struct dentry *root,
2194                                struct drm_minor *minor,
2195                                const char *name,
2196                                const struct file_operations *fops)
2197 {
2198         struct drm_device *dev = minor->dev;
2199         struct dentry *ent;
2200
2201         ent = debugfs_create_file(name,
2202                                   S_IRUGO | S_IWUSR,
2203                                   root, dev,
2204                                   fops);
2205         if (IS_ERR(ent))
2206                 return PTR_ERR(ent);
2207
2208         return drm_add_fake_info_node(minor, ent, fops);
2209 }
2210
2211 static struct drm_info_list i915_debugfs_list[] = {
2212         {"i915_capabilities", i915_capabilities, 0},
2213         {"i915_gem_objects", i915_gem_object_info, 0},
2214         {"i915_gem_gtt", i915_gem_gtt_info, 0},
2215         {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2216         {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2217         {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2218         {"i915_gem_stolen", i915_gem_stolen_list_info },
2219         {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2220         {"i915_gem_request", i915_gem_request_info, 0},
2221         {"i915_gem_seqno", i915_gem_seqno_info, 0},
2222         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2223         {"i915_gem_interrupt", i915_interrupt_info, 0},
2224         {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2225         {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2226         {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
2227         {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
2228         {"i915_rstdby_delays", i915_rstdby_delays, 0},
2229         {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2230         {"i915_delayfreq_table", i915_delayfreq_table, 0},
2231         {"i915_inttoext_table", i915_inttoext_table, 0},
2232         {"i915_drpc_info", i915_drpc_info, 0},
2233         {"i915_emon_status", i915_emon_status, 0},
2234         {"i915_ring_freq_table", i915_ring_freq_table, 0},
2235         {"i915_gfxec", i915_gfxec, 0},
2236         {"i915_fbc_status", i915_fbc_status, 0},
2237         {"i915_ips_status", i915_ips_status, 0},
2238         {"i915_sr_status", i915_sr_status, 0},
2239         {"i915_opregion", i915_opregion, 0},
2240         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2241         {"i915_context_status", i915_context_status, 0},
2242         {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
2243         {"i915_swizzle_info", i915_swizzle_info, 0},
2244         {"i915_ppgtt_info", i915_ppgtt_info, 0},
2245         {"i915_dpio", i915_dpio_info, 0},
2246         {"i915_llc", i915_llc, 0},
2247         {"i915_edp_psr_status", i915_edp_psr_status, 0},
2248         {"i915_energy_uJ", i915_energy_uJ, 0},
2249         {"i915_pc8_status", i915_pc8_status, 0},
2250 };
2251 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2252
2253 static struct i915_debugfs_files {
2254         const char *name;
2255         const struct file_operations *fops;
2256 } i915_debugfs_files[] = {
2257         {"i915_wedged", &i915_wedged_fops},
2258         {"i915_max_freq", &i915_max_freq_fops},
2259         {"i915_min_freq", &i915_min_freq_fops},
2260         {"i915_cache_sharing", &i915_cache_sharing_fops},
2261         {"i915_ring_stop", &i915_ring_stop_fops},
2262         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
2263         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
2264         {"i915_gem_drop_caches", &i915_drop_caches_fops},
2265         {"i915_error_state", &i915_error_state_fops},
2266         {"i915_next_seqno", &i915_next_seqno_fops},
2267 };
2268
2269 int i915_debugfs_init(struct drm_minor *minor)
2270 {
2271         int ret, i;
2272
2273         ret = i915_forcewake_create(minor->debugfs_root, minor);
2274         if (ret)
2275                 return ret;
2276
2277         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2278                 ret = i915_debugfs_create(minor->debugfs_root, minor,
2279                                           i915_debugfs_files[i].name,
2280                                           i915_debugfs_files[i].fops);
2281                 if (ret)
2282                         return ret;
2283         }
2284
2285         return drm_debugfs_create_files(i915_debugfs_list,
2286                                         I915_DEBUGFS_ENTRIES,
2287                                         minor->debugfs_root, minor);
2288 }
2289
2290 void i915_debugfs_cleanup(struct drm_minor *minor)
2291 {
2292         int i;
2293
2294         drm_debugfs_remove_files(i915_debugfs_list,
2295                                  I915_DEBUGFS_ENTRIES, minor);
2296         drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2297                                  1, minor);
2298         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2299                 struct drm_info_list *info_list =
2300                         (struct drm_info_list *) i915_debugfs_files[i].fops;
2301
2302                 drm_debugfs_remove_files(info_list, 1, minor);
2303         }
2304 }
2305
2306 #endif /* CONFIG_DEBUG_FS */