2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
43 #if defined(CONFIG_DEBUG_FS)
51 static const char *yesno(int v)
53 return v ? "yes" : "no";
56 /* As the drm_debugfs_init() routines are called before dev->dev_private is
57 * allocated we need to hook into the minor for release. */
59 drm_add_fake_info_node(struct drm_minor *minor,
63 struct drm_info_node *node;
65 node = kmalloc(sizeof(*node), GFP_KERNEL);
73 node->info_ent = (void *) key;
75 mutex_lock(&minor->debugfs_lock);
76 list_add(&node->list, &minor->debugfs_list);
77 mutex_unlock(&minor->debugfs_lock);
82 static int i915_capabilities(struct seq_file *m, void *data)
84 struct drm_info_node *node = (struct drm_info_node *) m->private;
85 struct drm_device *dev = node->minor->dev;
86 const struct intel_device_info *info = INTEL_INFO(dev);
88 seq_printf(m, "gen: %d\n", info->gen);
89 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
90 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
91 #define SEP_SEMICOLON ;
92 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
99 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
101 if (obj->user_pin_count > 0)
103 else if (obj->pin_count > 0)
109 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
111 switch (obj->tiling_mode) {
113 case I915_TILING_NONE: return " ";
114 case I915_TILING_X: return "X";
115 case I915_TILING_Y: return "Y";
119 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
121 return obj->has_global_gtt_mapping ? "g" : " ";
125 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
127 struct i915_vma *vma;
128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
131 get_tiling_flag(obj),
132 get_global_flag(obj),
133 obj->base.size / 1024,
134 obj->base.read_domains,
135 obj->base.write_domain,
136 obj->last_read_seqno,
137 obj->last_write_seqno,
138 obj->last_fenced_seqno,
139 i915_cache_level_str(obj->cache_level),
140 obj->dirty ? " dirty" : "",
141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
143 seq_printf(m, " (name: %d)", obj->base.name);
145 seq_printf(m, " (pinned x %d)", obj->pin_count);
146 if (obj->pin_display)
147 seq_printf(m, " (display)");
148 if (obj->fence_reg != I915_FENCE_REG_NONE)
149 seq_printf(m, " (fence: %d)", obj->fence_reg);
150 list_for_each_entry(vma, &obj->vma_list, vma_link) {
151 if (!i915_is_ggtt(vma->vm))
155 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
156 vma->node.start, vma->node.size);
159 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
160 if (obj->pin_mappable || obj->fault_mappable) {
162 if (obj->pin_mappable)
164 if (obj->fault_mappable)
167 seq_printf(m, " (%s mappable)", s);
169 if (obj->ring != NULL)
170 seq_printf(m, " (%s)", obj->ring->name);
173 static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
175 seq_putc(m, ctx->is_initialized ? 'I' : 'i');
176 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
180 static int i915_gem_object_list_info(struct seq_file *m, void *data)
182 struct drm_info_node *node = (struct drm_info_node *) m->private;
183 uintptr_t list = (uintptr_t) node->info_ent->data;
184 struct list_head *head;
185 struct drm_device *dev = node->minor->dev;
186 struct drm_i915_private *dev_priv = dev->dev_private;
187 struct i915_address_space *vm = &dev_priv->gtt.base;
188 struct i915_vma *vma;
189 size_t total_obj_size, total_gtt_size;
192 ret = mutex_lock_interruptible(&dev->struct_mutex);
196 /* FIXME: the user of this interface might want more than just GGTT */
199 seq_puts(m, "Active:\n");
200 head = &vm->active_list;
203 seq_puts(m, "Inactive:\n");
204 head = &vm->inactive_list;
207 mutex_unlock(&dev->struct_mutex);
211 total_obj_size = total_gtt_size = count = 0;
212 list_for_each_entry(vma, head, mm_list) {
214 describe_obj(m, vma->obj);
216 total_obj_size += vma->obj->base.size;
217 total_gtt_size += vma->node.size;
220 mutex_unlock(&dev->struct_mutex);
222 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
223 count, total_obj_size, total_gtt_size);
227 static int obj_rank_by_stolen(void *priv,
228 struct list_head *A, struct list_head *B)
230 struct drm_i915_gem_object *a =
231 container_of(A, struct drm_i915_gem_object, obj_exec_link);
232 struct drm_i915_gem_object *b =
233 container_of(B, struct drm_i915_gem_object, obj_exec_link);
235 return a->stolen->start - b->stolen->start;
238 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
240 struct drm_info_node *node = (struct drm_info_node *) m->private;
241 struct drm_device *dev = node->minor->dev;
242 struct drm_i915_private *dev_priv = dev->dev_private;
243 struct drm_i915_gem_object *obj;
244 size_t total_obj_size, total_gtt_size;
248 ret = mutex_lock_interruptible(&dev->struct_mutex);
252 total_obj_size = total_gtt_size = count = 0;
253 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
254 if (obj->stolen == NULL)
257 list_add(&obj->obj_exec_link, &stolen);
259 total_obj_size += obj->base.size;
260 total_gtt_size += i915_gem_obj_ggtt_size(obj);
263 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
264 if (obj->stolen == NULL)
267 list_add(&obj->obj_exec_link, &stolen);
269 total_obj_size += obj->base.size;
272 list_sort(NULL, &stolen, obj_rank_by_stolen);
273 seq_puts(m, "Stolen:\n");
274 while (!list_empty(&stolen)) {
275 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
277 describe_obj(m, obj);
279 list_del_init(&obj->obj_exec_link);
281 mutex_unlock(&dev->struct_mutex);
283 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
284 count, total_obj_size, total_gtt_size);
288 #define count_objects(list, member) do { \
289 list_for_each_entry(obj, list, member) { \
290 size += i915_gem_obj_ggtt_size(obj); \
292 if (obj->map_and_fenceable) { \
293 mappable_size += i915_gem_obj_ggtt_size(obj); \
301 size_t total, active, inactive, unbound;
304 static int per_file_stats(int id, void *ptr, void *data)
306 struct drm_i915_gem_object *obj = ptr;
307 struct file_stats *stats = data;
310 stats->total += obj->base.size;
312 if (i915_gem_obj_ggtt_bound(obj)) {
313 if (!list_empty(&obj->ring_list))
314 stats->active += obj->base.size;
316 stats->inactive += obj->base.size;
318 if (!list_empty(&obj->global_list))
319 stats->unbound += obj->base.size;
325 #define count_vmas(list, member) do { \
326 list_for_each_entry(vma, list, member) { \
327 size += i915_gem_obj_ggtt_size(vma->obj); \
329 if (vma->obj->map_and_fenceable) { \
330 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
336 static int i915_gem_object_info(struct seq_file *m, void* data)
338 struct drm_info_node *node = (struct drm_info_node *) m->private;
339 struct drm_device *dev = node->minor->dev;
340 struct drm_i915_private *dev_priv = dev->dev_private;
341 u32 count, mappable_count, purgeable_count;
342 size_t size, mappable_size, purgeable_size;
343 struct drm_i915_gem_object *obj;
344 struct i915_address_space *vm = &dev_priv->gtt.base;
345 struct drm_file *file;
346 struct i915_vma *vma;
349 ret = mutex_lock_interruptible(&dev->struct_mutex);
353 seq_printf(m, "%u objects, %zu bytes\n",
354 dev_priv->mm.object_count,
355 dev_priv->mm.object_memory);
357 size = count = mappable_size = mappable_count = 0;
358 count_objects(&dev_priv->mm.bound_list, global_list);
359 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
360 count, mappable_count, size, mappable_size);
362 size = count = mappable_size = mappable_count = 0;
363 count_vmas(&vm->active_list, mm_list);
364 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
365 count, mappable_count, size, mappable_size);
367 size = count = mappable_size = mappable_count = 0;
368 count_vmas(&vm->inactive_list, mm_list);
369 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
370 count, mappable_count, size, mappable_size);
372 size = count = purgeable_size = purgeable_count = 0;
373 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
374 size += obj->base.size, ++count;
375 if (obj->madv == I915_MADV_DONTNEED)
376 purgeable_size += obj->base.size, ++purgeable_count;
378 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
380 size = count = mappable_size = mappable_count = 0;
381 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
382 if (obj->fault_mappable) {
383 size += i915_gem_obj_ggtt_size(obj);
386 if (obj->pin_mappable) {
387 mappable_size += i915_gem_obj_ggtt_size(obj);
390 if (obj->madv == I915_MADV_DONTNEED) {
391 purgeable_size += obj->base.size;
395 seq_printf(m, "%u purgeable objects, %zu bytes\n",
396 purgeable_count, purgeable_size);
397 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
398 mappable_count, mappable_size);
399 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
402 seq_printf(m, "%zu [%lu] gtt total\n",
403 dev_priv->gtt.base.total,
404 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
407 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
408 struct file_stats stats;
410 memset(&stats, 0, sizeof(stats));
411 idr_for_each(&file->object_idr, per_file_stats, &stats);
412 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n",
413 get_pid_task(file->pid, PIDTYPE_PID)->comm,
421 mutex_unlock(&dev->struct_mutex);
426 static int i915_gem_gtt_info(struct seq_file *m, void *data)
428 struct drm_info_node *node = (struct drm_info_node *) m->private;
429 struct drm_device *dev = node->minor->dev;
430 uintptr_t list = (uintptr_t) node->info_ent->data;
431 struct drm_i915_private *dev_priv = dev->dev_private;
432 struct drm_i915_gem_object *obj;
433 size_t total_obj_size, total_gtt_size;
436 ret = mutex_lock_interruptible(&dev->struct_mutex);
440 total_obj_size = total_gtt_size = count = 0;
441 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
442 if (list == PINNED_LIST && obj->pin_count == 0)
446 describe_obj(m, obj);
448 total_obj_size += obj->base.size;
449 total_gtt_size += i915_gem_obj_ggtt_size(obj);
453 mutex_unlock(&dev->struct_mutex);
455 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
456 count, total_obj_size, total_gtt_size);
461 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
463 struct drm_info_node *node = (struct drm_info_node *) m->private;
464 struct drm_device *dev = node->minor->dev;
466 struct intel_crtc *crtc;
468 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
469 const char pipe = pipe_name(crtc->pipe);
470 const char plane = plane_name(crtc->plane);
471 struct intel_unpin_work *work;
473 spin_lock_irqsave(&dev->event_lock, flags);
474 work = crtc->unpin_work;
476 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
479 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
480 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
483 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
486 if (work->enable_stall_check)
487 seq_puts(m, "Stall check enabled, ");
489 seq_puts(m, "Stall check waiting for page flip ioctl, ");
490 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
492 if (work->old_fb_obj) {
493 struct drm_i915_gem_object *obj = work->old_fb_obj;
495 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
496 i915_gem_obj_ggtt_offset(obj));
498 if (work->pending_flip_obj) {
499 struct drm_i915_gem_object *obj = work->pending_flip_obj;
501 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
502 i915_gem_obj_ggtt_offset(obj));
505 spin_unlock_irqrestore(&dev->event_lock, flags);
511 static int i915_gem_request_info(struct seq_file *m, void *data)
513 struct drm_info_node *node = (struct drm_info_node *) m->private;
514 struct drm_device *dev = node->minor->dev;
515 drm_i915_private_t *dev_priv = dev->dev_private;
516 struct intel_ring_buffer *ring;
517 struct drm_i915_gem_request *gem_request;
520 ret = mutex_lock_interruptible(&dev->struct_mutex);
525 for_each_ring(ring, dev_priv, i) {
526 if (list_empty(&ring->request_list))
529 seq_printf(m, "%s requests:\n", ring->name);
530 list_for_each_entry(gem_request,
533 seq_printf(m, " %d @ %d\n",
535 (int) (jiffies - gem_request->emitted_jiffies));
539 mutex_unlock(&dev->struct_mutex);
542 seq_puts(m, "No requests\n");
547 static void i915_ring_seqno_info(struct seq_file *m,
548 struct intel_ring_buffer *ring)
550 if (ring->get_seqno) {
551 seq_printf(m, "Current sequence (%s): %u\n",
552 ring->name, ring->get_seqno(ring, false));
556 static int i915_gem_seqno_info(struct seq_file *m, void *data)
558 struct drm_info_node *node = (struct drm_info_node *) m->private;
559 struct drm_device *dev = node->minor->dev;
560 drm_i915_private_t *dev_priv = dev->dev_private;
561 struct intel_ring_buffer *ring;
564 ret = mutex_lock_interruptible(&dev->struct_mutex);
568 for_each_ring(ring, dev_priv, i)
569 i915_ring_seqno_info(m, ring);
571 mutex_unlock(&dev->struct_mutex);
577 static int i915_interrupt_info(struct seq_file *m, void *data)
579 struct drm_info_node *node = (struct drm_info_node *) m->private;
580 struct drm_device *dev = node->minor->dev;
581 drm_i915_private_t *dev_priv = dev->dev_private;
582 struct intel_ring_buffer *ring;
585 ret = mutex_lock_interruptible(&dev->struct_mutex);
589 if (INTEL_INFO(dev)->gen >= 8) {
591 seq_printf(m, "Master Interrupt Control:\t%08x\n",
592 I915_READ(GEN8_MASTER_IRQ));
594 for (i = 0; i < 4; i++) {
595 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
596 i, I915_READ(GEN8_GT_IMR(i)));
597 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
598 i, I915_READ(GEN8_GT_IIR(i)));
599 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
600 i, I915_READ(GEN8_GT_IER(i)));
604 seq_printf(m, "Pipe %c IMR:\t%08x\n",
606 I915_READ(GEN8_DE_PIPE_IMR(i)));
607 seq_printf(m, "Pipe %c IIR:\t%08x\n",
609 I915_READ(GEN8_DE_PIPE_IIR(i)));
610 seq_printf(m, "Pipe %c IER:\t%08x\n",
612 I915_READ(GEN8_DE_PIPE_IER(i)));
615 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
616 I915_READ(GEN8_DE_PORT_IMR));
617 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
618 I915_READ(GEN8_DE_PORT_IIR));
619 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
620 I915_READ(GEN8_DE_PORT_IER));
622 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
623 I915_READ(GEN8_DE_MISC_IMR));
624 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
625 I915_READ(GEN8_DE_MISC_IIR));
626 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
627 I915_READ(GEN8_DE_MISC_IER));
629 seq_printf(m, "PCU interrupt mask:\t%08x\n",
630 I915_READ(GEN8_PCU_IMR));
631 seq_printf(m, "PCU interrupt identity:\t%08x\n",
632 I915_READ(GEN8_PCU_IIR));
633 seq_printf(m, "PCU interrupt enable:\t%08x\n",
634 I915_READ(GEN8_PCU_IER));
635 } else if (IS_VALLEYVIEW(dev)) {
636 seq_printf(m, "Display IER:\t%08x\n",
638 seq_printf(m, "Display IIR:\t%08x\n",
640 seq_printf(m, "Display IIR_RW:\t%08x\n",
641 I915_READ(VLV_IIR_RW));
642 seq_printf(m, "Display IMR:\t%08x\n",
645 seq_printf(m, "Pipe %c stat:\t%08x\n",
647 I915_READ(PIPESTAT(pipe)));
649 seq_printf(m, "Master IER:\t%08x\n",
650 I915_READ(VLV_MASTER_IER));
652 seq_printf(m, "Render IER:\t%08x\n",
654 seq_printf(m, "Render IIR:\t%08x\n",
656 seq_printf(m, "Render IMR:\t%08x\n",
659 seq_printf(m, "PM IER:\t\t%08x\n",
660 I915_READ(GEN6_PMIER));
661 seq_printf(m, "PM IIR:\t\t%08x\n",
662 I915_READ(GEN6_PMIIR));
663 seq_printf(m, "PM IMR:\t\t%08x\n",
664 I915_READ(GEN6_PMIMR));
666 seq_printf(m, "Port hotplug:\t%08x\n",
667 I915_READ(PORT_HOTPLUG_EN));
668 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
669 I915_READ(VLV_DPFLIPSTAT));
670 seq_printf(m, "DPINVGTT:\t%08x\n",
671 I915_READ(DPINVGTT));
673 } else if (!HAS_PCH_SPLIT(dev)) {
674 seq_printf(m, "Interrupt enable: %08x\n",
676 seq_printf(m, "Interrupt identity: %08x\n",
678 seq_printf(m, "Interrupt mask: %08x\n",
681 seq_printf(m, "Pipe %c stat: %08x\n",
683 I915_READ(PIPESTAT(pipe)));
685 seq_printf(m, "North Display Interrupt enable: %08x\n",
687 seq_printf(m, "North Display Interrupt identity: %08x\n",
689 seq_printf(m, "North Display Interrupt mask: %08x\n",
691 seq_printf(m, "South Display Interrupt enable: %08x\n",
693 seq_printf(m, "South Display Interrupt identity: %08x\n",
695 seq_printf(m, "South Display Interrupt mask: %08x\n",
697 seq_printf(m, "Graphics Interrupt enable: %08x\n",
699 seq_printf(m, "Graphics Interrupt identity: %08x\n",
701 seq_printf(m, "Graphics Interrupt mask: %08x\n",
704 seq_printf(m, "Interrupts received: %d\n",
705 atomic_read(&dev_priv->irq_received));
706 for_each_ring(ring, dev_priv, i) {
707 if (INTEL_INFO(dev)->gen >= 6) {
709 "Graphics Interrupt mask (%s): %08x\n",
710 ring->name, I915_READ_IMR(ring));
712 i915_ring_seqno_info(m, ring);
714 mutex_unlock(&dev->struct_mutex);
719 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
721 struct drm_info_node *node = (struct drm_info_node *) m->private;
722 struct drm_device *dev = node->minor->dev;
723 drm_i915_private_t *dev_priv = dev->dev_private;
726 ret = mutex_lock_interruptible(&dev->struct_mutex);
730 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
731 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
732 for (i = 0; i < dev_priv->num_fence_regs; i++) {
733 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
735 seq_printf(m, "Fence %d, pin count = %d, object = ",
736 i, dev_priv->fence_regs[i].pin_count);
738 seq_puts(m, "unused");
740 describe_obj(m, obj);
744 mutex_unlock(&dev->struct_mutex);
748 static int i915_hws_info(struct seq_file *m, void *data)
750 struct drm_info_node *node = (struct drm_info_node *) m->private;
751 struct drm_device *dev = node->minor->dev;
752 drm_i915_private_t *dev_priv = dev->dev_private;
753 struct intel_ring_buffer *ring;
757 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
758 hws = ring->status_page.page_addr;
762 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
763 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
765 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
771 i915_error_state_write(struct file *filp,
772 const char __user *ubuf,
776 struct i915_error_state_file_priv *error_priv = filp->private_data;
777 struct drm_device *dev = error_priv->dev;
780 DRM_DEBUG_DRIVER("Resetting error state\n");
782 ret = mutex_lock_interruptible(&dev->struct_mutex);
786 i915_destroy_error_state(dev);
787 mutex_unlock(&dev->struct_mutex);
792 static int i915_error_state_open(struct inode *inode, struct file *file)
794 struct drm_device *dev = inode->i_private;
795 struct i915_error_state_file_priv *error_priv;
797 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
801 error_priv->dev = dev;
803 i915_error_state_get(dev, error_priv);
805 file->private_data = error_priv;
810 static int i915_error_state_release(struct inode *inode, struct file *file)
812 struct i915_error_state_file_priv *error_priv = file->private_data;
814 i915_error_state_put(error_priv);
820 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
821 size_t count, loff_t *pos)
823 struct i915_error_state_file_priv *error_priv = file->private_data;
824 struct drm_i915_error_state_buf error_str;
826 ssize_t ret_count = 0;
829 ret = i915_error_state_buf_init(&error_str, count, *pos);
833 ret = i915_error_state_to_str(&error_str, error_priv);
837 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
844 *pos = error_str.start + ret_count;
846 i915_error_state_buf_release(&error_str);
847 return ret ?: ret_count;
850 static const struct file_operations i915_error_state_fops = {
851 .owner = THIS_MODULE,
852 .open = i915_error_state_open,
853 .read = i915_error_state_read,
854 .write = i915_error_state_write,
855 .llseek = default_llseek,
856 .release = i915_error_state_release,
860 i915_next_seqno_get(void *data, u64 *val)
862 struct drm_device *dev = data;
863 drm_i915_private_t *dev_priv = dev->dev_private;
866 ret = mutex_lock_interruptible(&dev->struct_mutex);
870 *val = dev_priv->next_seqno;
871 mutex_unlock(&dev->struct_mutex);
877 i915_next_seqno_set(void *data, u64 val)
879 struct drm_device *dev = data;
882 ret = mutex_lock_interruptible(&dev->struct_mutex);
886 ret = i915_gem_set_seqno(dev, val);
887 mutex_unlock(&dev->struct_mutex);
892 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
893 i915_next_seqno_get, i915_next_seqno_set,
896 static int i915_rstdby_delays(struct seq_file *m, void *unused)
898 struct drm_info_node *node = (struct drm_info_node *) m->private;
899 struct drm_device *dev = node->minor->dev;
900 drm_i915_private_t *dev_priv = dev->dev_private;
904 ret = mutex_lock_interruptible(&dev->struct_mutex);
908 crstanddelay = I915_READ16(CRSTANDVID);
910 mutex_unlock(&dev->struct_mutex);
912 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
917 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
919 struct drm_info_node *node = (struct drm_info_node *) m->private;
920 struct drm_device *dev = node->minor->dev;
921 drm_i915_private_t *dev_priv = dev->dev_private;
924 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
927 u16 rgvswctl = I915_READ16(MEMSWCTL);
928 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
930 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
931 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
932 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
934 seq_printf(m, "Current P-state: %d\n",
935 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
936 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
937 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
938 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
939 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
940 u32 rpstat, cagf, reqf;
941 u32 rpupei, rpcurup, rpprevup;
942 u32 rpdownei, rpcurdown, rpprevdown;
945 /* RPSTAT1 is in the GT power well */
946 ret = mutex_lock_interruptible(&dev->struct_mutex);
950 gen6_gt_force_wake_get(dev_priv);
952 reqf = I915_READ(GEN6_RPNSWREQ);
953 reqf &= ~GEN6_TURBO_DISABLE;
958 reqf *= GT_FREQUENCY_MULTIPLIER;
960 rpstat = I915_READ(GEN6_RPSTAT1);
961 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
962 rpcurup = I915_READ(GEN6_RP_CUR_UP);
963 rpprevup = I915_READ(GEN6_RP_PREV_UP);
964 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
965 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
966 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
968 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
970 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
971 cagf *= GT_FREQUENCY_MULTIPLIER;
973 gen6_gt_force_wake_put(dev_priv);
974 mutex_unlock(&dev->struct_mutex);
976 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
977 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
978 seq_printf(m, "Render p-state ratio: %d\n",
979 (gt_perf_status & 0xff00) >> 8);
980 seq_printf(m, "Render p-state VID: %d\n",
981 gt_perf_status & 0xff);
982 seq_printf(m, "Render p-state limit: %d\n",
983 rp_state_limits & 0xff);
984 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
985 seq_printf(m, "CAGF: %dMHz\n", cagf);
986 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
988 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
989 GEN6_CURBSYTAVG_MASK);
990 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
991 GEN6_CURBSYTAVG_MASK);
992 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
994 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
995 GEN6_CURBSYTAVG_MASK);
996 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
997 GEN6_CURBSYTAVG_MASK);
999 max_freq = (rp_state_cap & 0xff0000) >> 16;
1000 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1001 max_freq * GT_FREQUENCY_MULTIPLIER);
1003 max_freq = (rp_state_cap & 0xff00) >> 8;
1004 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1005 max_freq * GT_FREQUENCY_MULTIPLIER);
1007 max_freq = rp_state_cap & 0xff;
1008 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1009 max_freq * GT_FREQUENCY_MULTIPLIER);
1011 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1012 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
1013 } else if (IS_VALLEYVIEW(dev)) {
1016 mutex_lock(&dev_priv->rps.hw_lock);
1017 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1018 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1019 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1021 val = vlv_punit_read(dev_priv, PUNIT_FUSE_BUS1);
1022 seq_printf(m, "max GPU freq: %d MHz\n",
1023 vlv_gpu_freq(dev_priv->mem_freq, val));
1025 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM);
1026 seq_printf(m, "min GPU freq: %d MHz\n",
1027 vlv_gpu_freq(dev_priv->mem_freq, val));
1029 seq_printf(m, "current GPU freq: %d MHz\n",
1030 vlv_gpu_freq(dev_priv->mem_freq,
1031 (freq_sts >> 8) & 0xff));
1032 mutex_unlock(&dev_priv->rps.hw_lock);
1034 seq_puts(m, "no P-state info available\n");
1040 static int i915_delayfreq_table(struct seq_file *m, void *unused)
1042 struct drm_info_node *node = (struct drm_info_node *) m->private;
1043 struct drm_device *dev = node->minor->dev;
1044 drm_i915_private_t *dev_priv = dev->dev_private;
1048 ret = mutex_lock_interruptible(&dev->struct_mutex);
1052 for (i = 0; i < 16; i++) {
1053 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
1054 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
1055 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1058 mutex_unlock(&dev->struct_mutex);
1063 static inline int MAP_TO_MV(int map)
1065 return 1250 - (map * 25);
1068 static int i915_inttoext_table(struct seq_file *m, void *unused)
1070 struct drm_info_node *node = (struct drm_info_node *) m->private;
1071 struct drm_device *dev = node->minor->dev;
1072 drm_i915_private_t *dev_priv = dev->dev_private;
1076 ret = mutex_lock_interruptible(&dev->struct_mutex);
1080 for (i = 1; i <= 32; i++) {
1081 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1082 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1085 mutex_unlock(&dev->struct_mutex);
1090 static int ironlake_drpc_info(struct seq_file *m)
1092 struct drm_info_node *node = (struct drm_info_node *) m->private;
1093 struct drm_device *dev = node->minor->dev;
1094 drm_i915_private_t *dev_priv = dev->dev_private;
1095 u32 rgvmodectl, rstdbyctl;
1099 ret = mutex_lock_interruptible(&dev->struct_mutex);
1103 rgvmodectl = I915_READ(MEMMODECTL);
1104 rstdbyctl = I915_READ(RSTDBYCTL);
1105 crstandvid = I915_READ16(CRSTANDVID);
1107 mutex_unlock(&dev->struct_mutex);
1109 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1111 seq_printf(m, "Boost freq: %d\n",
1112 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1113 MEMMODE_BOOST_FREQ_SHIFT);
1114 seq_printf(m, "HW control enabled: %s\n",
1115 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1116 seq_printf(m, "SW control enabled: %s\n",
1117 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1118 seq_printf(m, "Gated voltage change: %s\n",
1119 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1120 seq_printf(m, "Starting frequency: P%d\n",
1121 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1122 seq_printf(m, "Max P-state: P%d\n",
1123 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1124 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1125 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1126 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1127 seq_printf(m, "Render standby enabled: %s\n",
1128 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1129 seq_puts(m, "Current RS state: ");
1130 switch (rstdbyctl & RSX_STATUS_MASK) {
1132 seq_puts(m, "on\n");
1134 case RSX_STATUS_RC1:
1135 seq_puts(m, "RC1\n");
1137 case RSX_STATUS_RC1E:
1138 seq_puts(m, "RC1E\n");
1140 case RSX_STATUS_RS1:
1141 seq_puts(m, "RS1\n");
1143 case RSX_STATUS_RS2:
1144 seq_puts(m, "RS2 (RC6)\n");
1146 case RSX_STATUS_RS3:
1147 seq_puts(m, "RC3 (RC6+)\n");
1150 seq_puts(m, "unknown\n");
1157 static int gen6_drpc_info(struct seq_file *m)
1160 struct drm_info_node *node = (struct drm_info_node *) m->private;
1161 struct drm_device *dev = node->minor->dev;
1162 struct drm_i915_private *dev_priv = dev->dev_private;
1163 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1164 unsigned forcewake_count;
1167 ret = mutex_lock_interruptible(&dev->struct_mutex);
1171 spin_lock_irq(&dev_priv->uncore.lock);
1172 forcewake_count = dev_priv->uncore.forcewake_count;
1173 spin_unlock_irq(&dev_priv->uncore.lock);
1175 if (forcewake_count) {
1176 seq_puts(m, "RC information inaccurate because somebody "
1177 "holds a forcewake reference \n");
1179 /* NB: we cannot use forcewake, else we read the wrong values */
1180 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1182 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1185 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1186 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1188 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1189 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1190 mutex_unlock(&dev->struct_mutex);
1191 mutex_lock(&dev_priv->rps.hw_lock);
1192 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1193 mutex_unlock(&dev_priv->rps.hw_lock);
1195 seq_printf(m, "Video Turbo Mode: %s\n",
1196 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1197 seq_printf(m, "HW control enabled: %s\n",
1198 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1199 seq_printf(m, "SW control enabled: %s\n",
1200 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1201 GEN6_RP_MEDIA_SW_MODE));
1202 seq_printf(m, "RC1e Enabled: %s\n",
1203 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1204 seq_printf(m, "RC6 Enabled: %s\n",
1205 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1206 seq_printf(m, "Deep RC6 Enabled: %s\n",
1207 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1208 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1209 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1210 seq_puts(m, "Current RC state: ");
1211 switch (gt_core_status & GEN6_RCn_MASK) {
1213 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1214 seq_puts(m, "Core Power Down\n");
1216 seq_puts(m, "on\n");
1219 seq_puts(m, "RC3\n");
1222 seq_puts(m, "RC6\n");
1225 seq_puts(m, "RC7\n");
1228 seq_puts(m, "Unknown\n");
1232 seq_printf(m, "Core Power Down: %s\n",
1233 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1235 /* Not exactly sure what this is */
1236 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1237 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1238 seq_printf(m, "RC6 residency since boot: %u\n",
1239 I915_READ(GEN6_GT_GFX_RC6));
1240 seq_printf(m, "RC6+ residency since boot: %u\n",
1241 I915_READ(GEN6_GT_GFX_RC6p));
1242 seq_printf(m, "RC6++ residency since boot: %u\n",
1243 I915_READ(GEN6_GT_GFX_RC6pp));
1245 seq_printf(m, "RC6 voltage: %dmV\n",
1246 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1247 seq_printf(m, "RC6+ voltage: %dmV\n",
1248 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1249 seq_printf(m, "RC6++ voltage: %dmV\n",
1250 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1254 static int i915_drpc_info(struct seq_file *m, void *unused)
1256 struct drm_info_node *node = (struct drm_info_node *) m->private;
1257 struct drm_device *dev = node->minor->dev;
1259 if (IS_GEN6(dev) || IS_GEN7(dev))
1260 return gen6_drpc_info(m);
1262 return ironlake_drpc_info(m);
1265 static int i915_fbc_status(struct seq_file *m, void *unused)
1267 struct drm_info_node *node = (struct drm_info_node *) m->private;
1268 struct drm_device *dev = node->minor->dev;
1269 drm_i915_private_t *dev_priv = dev->dev_private;
1271 if (!I915_HAS_FBC(dev)) {
1272 seq_puts(m, "FBC unsupported on this chipset\n");
1276 if (intel_fbc_enabled(dev)) {
1277 seq_puts(m, "FBC enabled\n");
1279 seq_puts(m, "FBC disabled: ");
1280 switch (dev_priv->fbc.no_fbc_reason) {
1282 seq_puts(m, "FBC actived, but currently disabled in hardware");
1284 case FBC_UNSUPPORTED:
1285 seq_puts(m, "unsupported by this chipset");
1288 seq_puts(m, "no outputs");
1290 case FBC_STOLEN_TOO_SMALL:
1291 seq_puts(m, "not enough stolen memory");
1293 case FBC_UNSUPPORTED_MODE:
1294 seq_puts(m, "mode not supported");
1296 case FBC_MODE_TOO_LARGE:
1297 seq_puts(m, "mode too large");
1300 seq_puts(m, "FBC unsupported on plane");
1303 seq_puts(m, "scanout buffer not tiled");
1305 case FBC_MULTIPLE_PIPES:
1306 seq_puts(m, "multiple pipes are enabled");
1308 case FBC_MODULE_PARAM:
1309 seq_puts(m, "disabled per module param (default off)");
1311 case FBC_CHIP_DEFAULT:
1312 seq_puts(m, "disabled per chip default");
1315 seq_puts(m, "unknown reason");
1322 static int i915_ips_status(struct seq_file *m, void *unused)
1324 struct drm_info_node *node = (struct drm_info_node *) m->private;
1325 struct drm_device *dev = node->minor->dev;
1326 struct drm_i915_private *dev_priv = dev->dev_private;
1328 if (!HAS_IPS(dev)) {
1329 seq_puts(m, "not supported\n");
1333 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1334 seq_puts(m, "enabled\n");
1336 seq_puts(m, "disabled\n");
1341 static int i915_sr_status(struct seq_file *m, void *unused)
1343 struct drm_info_node *node = (struct drm_info_node *) m->private;
1344 struct drm_device *dev = node->minor->dev;
1345 drm_i915_private_t *dev_priv = dev->dev_private;
1346 bool sr_enabled = false;
1348 if (HAS_PCH_SPLIT(dev))
1349 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1350 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1351 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1352 else if (IS_I915GM(dev))
1353 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1354 else if (IS_PINEVIEW(dev))
1355 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1357 seq_printf(m, "self-refresh: %s\n",
1358 sr_enabled ? "enabled" : "disabled");
1363 static int i915_emon_status(struct seq_file *m, void *unused)
1365 struct drm_info_node *node = (struct drm_info_node *) m->private;
1366 struct drm_device *dev = node->minor->dev;
1367 drm_i915_private_t *dev_priv = dev->dev_private;
1368 unsigned long temp, chipset, gfx;
1374 ret = mutex_lock_interruptible(&dev->struct_mutex);
1378 temp = i915_mch_val(dev_priv);
1379 chipset = i915_chipset_val(dev_priv);
1380 gfx = i915_gfx_val(dev_priv);
1381 mutex_unlock(&dev->struct_mutex);
1383 seq_printf(m, "GMCH temp: %ld\n", temp);
1384 seq_printf(m, "Chipset power: %ld\n", chipset);
1385 seq_printf(m, "GFX power: %ld\n", gfx);
1386 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1391 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1393 struct drm_info_node *node = (struct drm_info_node *) m->private;
1394 struct drm_device *dev = node->minor->dev;
1395 drm_i915_private_t *dev_priv = dev->dev_private;
1397 int gpu_freq, ia_freq;
1399 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1400 seq_puts(m, "unsupported on this chipset\n");
1404 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1406 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1410 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1412 for (gpu_freq = dev_priv->rps.min_delay;
1413 gpu_freq <= dev_priv->rps.max_delay;
1416 sandybridge_pcode_read(dev_priv,
1417 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1419 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1420 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1421 ((ia_freq >> 0) & 0xff) * 100,
1422 ((ia_freq >> 8) & 0xff) * 100);
1425 mutex_unlock(&dev_priv->rps.hw_lock);
1430 static int i915_gfxec(struct seq_file *m, void *unused)
1432 struct drm_info_node *node = (struct drm_info_node *) m->private;
1433 struct drm_device *dev = node->minor->dev;
1434 drm_i915_private_t *dev_priv = dev->dev_private;
1437 ret = mutex_lock_interruptible(&dev->struct_mutex);
1441 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1443 mutex_unlock(&dev->struct_mutex);
1448 static int i915_opregion(struct seq_file *m, void *unused)
1450 struct drm_info_node *node = (struct drm_info_node *) m->private;
1451 struct drm_device *dev = node->minor->dev;
1452 drm_i915_private_t *dev_priv = dev->dev_private;
1453 struct intel_opregion *opregion = &dev_priv->opregion;
1454 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1460 ret = mutex_lock_interruptible(&dev->struct_mutex);
1464 if (opregion->header) {
1465 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1466 seq_write(m, data, OPREGION_SIZE);
1469 mutex_unlock(&dev->struct_mutex);
1476 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1478 struct drm_info_node *node = (struct drm_info_node *) m->private;
1479 struct drm_device *dev = node->minor->dev;
1480 struct intel_fbdev *ifbdev = NULL;
1481 struct intel_framebuffer *fb;
1483 #ifdef CONFIG_DRM_I915_FBDEV
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485 int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1489 ifbdev = dev_priv->fbdev;
1490 fb = to_intel_framebuffer(ifbdev->helper.fb);
1492 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1496 fb->base.bits_per_pixel,
1497 atomic_read(&fb->base.refcount.refcount));
1498 describe_obj(m, fb->obj);
1500 mutex_unlock(&dev->mode_config.mutex);
1503 mutex_lock(&dev->mode_config.fb_lock);
1504 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1505 if (ifbdev && &fb->base == ifbdev->helper.fb)
1508 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1512 fb->base.bits_per_pixel,
1513 atomic_read(&fb->base.refcount.refcount));
1514 describe_obj(m, fb->obj);
1517 mutex_unlock(&dev->mode_config.fb_lock);
1522 static int i915_context_status(struct seq_file *m, void *unused)
1524 struct drm_info_node *node = (struct drm_info_node *) m->private;
1525 struct drm_device *dev = node->minor->dev;
1526 drm_i915_private_t *dev_priv = dev->dev_private;
1527 struct intel_ring_buffer *ring;
1528 struct i915_hw_context *ctx;
1531 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1535 if (dev_priv->ips.pwrctx) {
1536 seq_puts(m, "power context ");
1537 describe_obj(m, dev_priv->ips.pwrctx);
1541 if (dev_priv->ips.renderctx) {
1542 seq_puts(m, "render context ");
1543 describe_obj(m, dev_priv->ips.renderctx);
1547 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1548 seq_puts(m, "HW context ");
1549 describe_ctx(m, ctx);
1550 for_each_ring(ring, dev_priv, i)
1551 if (ring->default_context == ctx)
1552 seq_printf(m, "(default context %s) ", ring->name);
1554 describe_obj(m, ctx->obj);
1558 mutex_unlock(&dev->mode_config.mutex);
1563 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1565 struct drm_info_node *node = (struct drm_info_node *) m->private;
1566 struct drm_device *dev = node->minor->dev;
1567 struct drm_i915_private *dev_priv = dev->dev_private;
1568 unsigned forcewake_count;
1570 spin_lock_irq(&dev_priv->uncore.lock);
1571 forcewake_count = dev_priv->uncore.forcewake_count;
1572 spin_unlock_irq(&dev_priv->uncore.lock);
1574 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1579 static const char *swizzle_string(unsigned swizzle)
1582 case I915_BIT_6_SWIZZLE_NONE:
1584 case I915_BIT_6_SWIZZLE_9:
1586 case I915_BIT_6_SWIZZLE_9_10:
1587 return "bit9/bit10";
1588 case I915_BIT_6_SWIZZLE_9_11:
1589 return "bit9/bit11";
1590 case I915_BIT_6_SWIZZLE_9_10_11:
1591 return "bit9/bit10/bit11";
1592 case I915_BIT_6_SWIZZLE_9_17:
1593 return "bit9/bit17";
1594 case I915_BIT_6_SWIZZLE_9_10_17:
1595 return "bit9/bit10/bit17";
1596 case I915_BIT_6_SWIZZLE_UNKNOWN:
1603 static int i915_swizzle_info(struct seq_file *m, void *data)
1605 struct drm_info_node *node = (struct drm_info_node *) m->private;
1606 struct drm_device *dev = node->minor->dev;
1607 struct drm_i915_private *dev_priv = dev->dev_private;
1610 ret = mutex_lock_interruptible(&dev->struct_mutex);
1614 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1615 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1616 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1617 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1619 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1620 seq_printf(m, "DDC = 0x%08x\n",
1622 seq_printf(m, "C0DRB3 = 0x%04x\n",
1623 I915_READ16(C0DRB3));
1624 seq_printf(m, "C1DRB3 = 0x%04x\n",
1625 I915_READ16(C1DRB3));
1626 } else if (INTEL_INFO(dev)->gen >= 6) {
1627 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1628 I915_READ(MAD_DIMM_C0));
1629 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1630 I915_READ(MAD_DIMM_C1));
1631 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1632 I915_READ(MAD_DIMM_C2));
1633 seq_printf(m, "TILECTL = 0x%08x\n",
1634 I915_READ(TILECTL));
1636 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1637 I915_READ(GAMTARBMODE));
1639 seq_printf(m, "ARB_MODE = 0x%08x\n",
1640 I915_READ(ARB_MODE));
1641 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1642 I915_READ(DISP_ARB_CTL));
1644 mutex_unlock(&dev->struct_mutex);
1649 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1651 struct drm_i915_private *dev_priv = dev->dev_private;
1652 struct intel_ring_buffer *ring;
1653 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1659 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
1660 seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
1661 for_each_ring(ring, dev_priv, unused) {
1662 seq_printf(m, "%s\n", ring->name);
1663 for (i = 0; i < 4; i++) {
1664 u32 offset = 0x270 + i * 8;
1665 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
1667 pdp |= I915_READ(ring->mmio_base + offset);
1668 for (i = 0; i < 4; i++)
1669 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1674 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1676 struct drm_i915_private *dev_priv = dev->dev_private;
1677 struct intel_ring_buffer *ring;
1680 if (INTEL_INFO(dev)->gen == 6)
1681 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1683 for_each_ring(ring, dev_priv, i) {
1684 seq_printf(m, "%s\n", ring->name);
1685 if (INTEL_INFO(dev)->gen == 7)
1686 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1687 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1688 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1689 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1691 if (dev_priv->mm.aliasing_ppgtt) {
1692 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1694 seq_puts(m, "aliasing PPGTT:\n");
1695 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1697 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1700 static int i915_ppgtt_info(struct seq_file *m, void *data)
1702 struct drm_info_node *node = (struct drm_info_node *) m->private;
1703 struct drm_device *dev = node->minor->dev;
1705 int ret = mutex_lock_interruptible(&dev->struct_mutex);
1709 if (INTEL_INFO(dev)->gen >= 8)
1710 gen8_ppgtt_info(m, dev);
1711 else if (INTEL_INFO(dev)->gen >= 6)
1712 gen6_ppgtt_info(m, dev);
1714 mutex_unlock(&dev->struct_mutex);
1719 static int i915_dpio_info(struct seq_file *m, void *data)
1721 struct drm_info_node *node = (struct drm_info_node *) m->private;
1722 struct drm_device *dev = node->minor->dev;
1723 struct drm_i915_private *dev_priv = dev->dev_private;
1727 if (!IS_VALLEYVIEW(dev)) {
1728 seq_puts(m, "unsupported\n");
1732 ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1736 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1738 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1739 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
1740 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1741 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
1743 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1744 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
1745 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1746 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
1748 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1749 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
1750 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1751 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
1753 seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
1754 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
1755 seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
1756 vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
1758 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1759 vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
1761 mutex_unlock(&dev_priv->dpio_lock);
1766 static int i915_llc(struct seq_file *m, void *data)
1768 struct drm_info_node *node = (struct drm_info_node *) m->private;
1769 struct drm_device *dev = node->minor->dev;
1770 struct drm_i915_private *dev_priv = dev->dev_private;
1772 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1773 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1774 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1779 static int i915_edp_psr_status(struct seq_file *m, void *data)
1781 struct drm_info_node *node = m->private;
1782 struct drm_device *dev = node->minor->dev;
1783 struct drm_i915_private *dev_priv = dev->dev_private;
1785 bool enabled = false;
1787 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1788 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1790 enabled = HAS_PSR(dev) &&
1791 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1792 seq_printf(m, "Enabled: %s\n", yesno(enabled));
1795 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1796 EDP_PSR_PERF_CNT_MASK;
1797 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1802 static int i915_energy_uJ(struct seq_file *m, void *data)
1804 struct drm_info_node *node = m->private;
1805 struct drm_device *dev = node->minor->dev;
1806 struct drm_i915_private *dev_priv = dev->dev_private;
1810 if (INTEL_INFO(dev)->gen < 6)
1813 rdmsrl(MSR_RAPL_POWER_UNIT, power);
1814 power = (power & 0x1f00) >> 8;
1815 units = 1000000 / (1 << power); /* convert to uJ */
1816 power = I915_READ(MCH_SECP_NRG_STTS);
1819 seq_printf(m, "%llu", (long long unsigned)power);
1824 static int i915_pc8_status(struct seq_file *m, void *unused)
1826 struct drm_info_node *node = (struct drm_info_node *) m->private;
1827 struct drm_device *dev = node->minor->dev;
1828 struct drm_i915_private *dev_priv = dev->dev_private;
1830 if (!IS_HASWELL(dev)) {
1831 seq_puts(m, "not supported\n");
1835 mutex_lock(&dev_priv->pc8.lock);
1836 seq_printf(m, "Requirements met: %s\n",
1837 yesno(dev_priv->pc8.requirements_met));
1838 seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
1839 seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
1840 seq_printf(m, "IRQs disabled: %s\n",
1841 yesno(dev_priv->pc8.irqs_disabled));
1842 seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
1843 mutex_unlock(&dev_priv->pc8.lock);
1848 struct pipe_crc_info {
1850 struct drm_device *dev;
1854 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
1856 struct pipe_crc_info *info = inode->i_private;
1857 struct drm_i915_private *dev_priv = info->dev->dev_private;
1858 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1860 spin_lock_irq(&pipe_crc->lock);
1862 if (pipe_crc->opened) {
1863 spin_unlock_irq(&pipe_crc->lock);
1864 return -EBUSY; /* already open */
1867 pipe_crc->opened = true;
1868 filep->private_data = inode->i_private;
1870 spin_unlock_irq(&pipe_crc->lock);
1875 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
1877 struct pipe_crc_info *info = inode->i_private;
1878 struct drm_i915_private *dev_priv = info->dev->dev_private;
1879 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1881 spin_lock_irq(&pipe_crc->lock);
1882 pipe_crc->opened = false;
1883 spin_unlock_irq(&pipe_crc->lock);
1888 /* (6 fields, 8 chars each, space separated (5) + '\n') */
1889 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
1890 /* account for \'0' */
1891 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
1893 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
1895 assert_spin_locked(&pipe_crc->lock);
1896 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
1897 INTEL_PIPE_CRC_ENTRIES_NR);
1901 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
1904 struct pipe_crc_info *info = filep->private_data;
1905 struct drm_device *dev = info->dev;
1906 struct drm_i915_private *dev_priv = dev->dev_private;
1907 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
1908 char buf[PIPE_CRC_BUFFER_LEN];
1909 int head, tail, n_entries, n;
1913 * Don't allow user space to provide buffers not big enough to hold
1916 if (count < PIPE_CRC_LINE_LEN)
1919 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
1922 /* nothing to read */
1923 spin_lock_irq(&pipe_crc->lock);
1924 while (pipe_crc_data_count(pipe_crc) == 0) {
1927 if (filep->f_flags & O_NONBLOCK) {
1928 spin_unlock_irq(&pipe_crc->lock);
1932 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
1933 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
1935 spin_unlock_irq(&pipe_crc->lock);
1940 /* We now have one or more entries to read */
1941 head = pipe_crc->head;
1942 tail = pipe_crc->tail;
1943 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
1944 count / PIPE_CRC_LINE_LEN);
1945 spin_unlock_irq(&pipe_crc->lock);
1950 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
1953 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
1954 "%8u %8x %8x %8x %8x %8x\n",
1955 entry->frame, entry->crc[0],
1956 entry->crc[1], entry->crc[2],
1957 entry->crc[3], entry->crc[4]);
1959 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
1960 buf, PIPE_CRC_LINE_LEN);
1961 if (ret == PIPE_CRC_LINE_LEN)
1964 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
1965 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1967 } while (--n_entries);
1969 spin_lock_irq(&pipe_crc->lock);
1970 pipe_crc->tail = tail;
1971 spin_unlock_irq(&pipe_crc->lock);
1976 static const struct file_operations i915_pipe_crc_fops = {
1977 .owner = THIS_MODULE,
1978 .open = i915_pipe_crc_open,
1979 .read = i915_pipe_crc_read,
1980 .release = i915_pipe_crc_release,
1983 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
1985 .name = "i915_pipe_A_crc",
1989 .name = "i915_pipe_B_crc",
1993 .name = "i915_pipe_C_crc",
1998 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
2001 struct drm_device *dev = minor->dev;
2003 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
2006 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2007 &i915_pipe_crc_fops);
2009 return PTR_ERR(ent);
2011 return drm_add_fake_info_node(minor, ent, info);
2014 static const char * const pipe_crc_sources[] = {
2027 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
2029 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
2030 return pipe_crc_sources[source];
2033 static int display_crc_ctl_show(struct seq_file *m, void *data)
2035 struct drm_device *dev = m->private;
2036 struct drm_i915_private *dev_priv = dev->dev_private;
2039 for (i = 0; i < I915_MAX_PIPES; i++)
2040 seq_printf(m, "%c %s\n", pipe_name(i),
2041 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
2046 static int display_crc_ctl_open(struct inode *inode, struct file *file)
2048 struct drm_device *dev = inode->i_private;
2050 return single_open(file, display_crc_ctl_show, dev);
2053 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2056 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2057 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2060 case INTEL_PIPE_CRC_SOURCE_PIPE:
2061 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
2063 case INTEL_PIPE_CRC_SOURCE_NONE:
2073 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2074 enum intel_pipe_crc_source *source)
2076 struct intel_encoder *encoder;
2077 struct intel_crtc *crtc;
2078 struct intel_digital_port *dig_port;
2081 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2083 mutex_lock(&dev->mode_config.mutex);
2084 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2086 if (!encoder->base.crtc)
2089 crtc = to_intel_crtc(encoder->base.crtc);
2091 if (crtc->pipe != pipe)
2094 switch (encoder->type) {
2095 case INTEL_OUTPUT_TVOUT:
2096 *source = INTEL_PIPE_CRC_SOURCE_TV;
2098 case INTEL_OUTPUT_DISPLAYPORT:
2099 case INTEL_OUTPUT_EDP:
2100 dig_port = enc_to_dig_port(&encoder->base);
2101 switch (dig_port->port) {
2103 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
2106 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
2109 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
2112 WARN(1, "nonexisting DP port %c\n",
2113 port_name(dig_port->port));
2119 mutex_unlock(&dev->mode_config.mutex);
2124 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2126 enum intel_pipe_crc_source *source,
2129 struct drm_i915_private *dev_priv = dev->dev_private;
2130 bool need_stable_symbols = false;
2132 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2133 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2139 case INTEL_PIPE_CRC_SOURCE_PIPE:
2140 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2142 case INTEL_PIPE_CRC_SOURCE_DP_B:
2143 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
2144 need_stable_symbols = true;
2146 case INTEL_PIPE_CRC_SOURCE_DP_C:
2147 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
2148 need_stable_symbols = true;
2150 case INTEL_PIPE_CRC_SOURCE_NONE:
2158 * When the pipe CRC tap point is after the transcoders we need
2159 * to tweak symbol-level features to produce a deterministic series of
2160 * symbols for a given frame. We need to reset those features only once
2161 * a frame (instead of every nth symbol):
2162 * - DC-balance: used to ensure a better clock recovery from the data
2164 * - DisplayPort scrambling: used for EMI reduction
2166 if (need_stable_symbols) {
2167 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2169 WARN_ON(!IS_G4X(dev));
2171 tmp |= DC_BALANCE_RESET_VLV;
2173 tmp |= PIPE_A_SCRAMBLE_RESET;
2175 tmp |= PIPE_B_SCRAMBLE_RESET;
2177 I915_WRITE(PORT_DFT2_G4X, tmp);
2183 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
2185 enum intel_pipe_crc_source *source,
2188 struct drm_i915_private *dev_priv = dev->dev_private;
2189 bool need_stable_symbols = false;
2191 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2192 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2198 case INTEL_PIPE_CRC_SOURCE_PIPE:
2199 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
2201 case INTEL_PIPE_CRC_SOURCE_TV:
2202 if (!SUPPORTS_TV(dev))
2204 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
2206 case INTEL_PIPE_CRC_SOURCE_DP_B:
2209 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
2210 need_stable_symbols = true;
2212 case INTEL_PIPE_CRC_SOURCE_DP_C:
2215 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
2216 need_stable_symbols = true;
2218 case INTEL_PIPE_CRC_SOURCE_DP_D:
2221 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
2222 need_stable_symbols = true;
2224 case INTEL_PIPE_CRC_SOURCE_NONE:
2232 * When the pipe CRC tap point is after the transcoders we need
2233 * to tweak symbol-level features to produce a deterministic series of
2234 * symbols for a given frame. We need to reset those features only once
2235 * a frame (instead of every nth symbol):
2236 * - DC-balance: used to ensure a better clock recovery from the data
2238 * - DisplayPort scrambling: used for EMI reduction
2240 if (need_stable_symbols) {
2241 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2243 WARN_ON(!IS_G4X(dev));
2245 I915_WRITE(PORT_DFT_I9XX,
2246 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
2249 tmp |= PIPE_A_SCRAMBLE_RESET;
2251 tmp |= PIPE_B_SCRAMBLE_RESET;
2253 I915_WRITE(PORT_DFT2_G4X, tmp);
2259 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
2262 struct drm_i915_private *dev_priv = dev->dev_private;
2263 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2266 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2268 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2269 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
2270 tmp &= ~DC_BALANCE_RESET_VLV;
2271 I915_WRITE(PORT_DFT2_G4X, tmp);
2275 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
2278 struct drm_i915_private *dev_priv = dev->dev_private;
2279 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2282 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2284 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2285 I915_WRITE(PORT_DFT2_G4X, tmp);
2287 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
2288 I915_WRITE(PORT_DFT_I9XX,
2289 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
2293 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2296 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2297 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2300 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2301 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
2303 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2304 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
2306 case INTEL_PIPE_CRC_SOURCE_PIPE:
2307 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
2309 case INTEL_PIPE_CRC_SOURCE_NONE:
2319 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2322 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2323 *source = INTEL_PIPE_CRC_SOURCE_PF;
2326 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2327 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
2329 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2330 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
2332 case INTEL_PIPE_CRC_SOURCE_PF:
2333 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
2335 case INTEL_PIPE_CRC_SOURCE_NONE:
2345 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2346 enum intel_pipe_crc_source source)
2348 struct drm_i915_private *dev_priv = dev->dev_private;
2349 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
2353 if (pipe_crc->source == source)
2356 /* forbid changing the source without going back to 'none' */
2357 if (pipe_crc->source && source)
2361 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
2362 else if (INTEL_INFO(dev)->gen < 5)
2363 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2364 else if (IS_VALLEYVIEW(dev))
2365 ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
2366 else if (IS_GEN5(dev) || IS_GEN6(dev))
2367 ret = ilk_pipe_crc_ctl_reg(&source, &val);
2369 ret = ivb_pipe_crc_ctl_reg(&source, &val);
2374 /* none -> real source transition */
2376 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
2377 pipe_name(pipe), pipe_crc_source_name(source));
2379 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
2380 INTEL_PIPE_CRC_ENTRIES_NR,
2382 if (!pipe_crc->entries)
2385 spin_lock_irq(&pipe_crc->lock);
2388 spin_unlock_irq(&pipe_crc->lock);
2391 pipe_crc->source = source;
2393 I915_WRITE(PIPE_CRC_CTL(pipe), val);
2394 POSTING_READ(PIPE_CRC_CTL(pipe));
2396 /* real source -> none transition */
2397 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
2398 struct intel_pipe_crc_entry *entries;
2400 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2403 intel_wait_for_vblank(dev, pipe);
2405 spin_lock_irq(&pipe_crc->lock);
2406 entries = pipe_crc->entries;
2407 pipe_crc->entries = NULL;
2408 spin_unlock_irq(&pipe_crc->lock);
2413 g4x_undo_pipe_scramble_reset(dev, pipe);
2414 else if (IS_VALLEYVIEW(dev))
2415 vlv_undo_pipe_scramble_reset(dev, pipe);
2422 * Parse pipe CRC command strings:
2423 * command: wsp* object wsp+ name wsp+ source wsp*
2426 * source: (none | plane1 | plane2 | pf)
2427 * wsp: (#0x20 | #0x9 | #0xA)+
2430 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
2431 * "pipe A none" -> Stop CRC
2433 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
2440 /* skip leading white space */
2441 buf = skip_spaces(buf);
2443 break; /* end of buffer */
2445 /* find end of word */
2446 for (end = buf; *end && !isspace(*end); end++)
2449 if (n_words == max_words) {
2450 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2452 return -EINVAL; /* ran out of words[] before bytes */
2457 words[n_words++] = buf;
2464 enum intel_pipe_crc_object {
2465 PIPE_CRC_OBJECT_PIPE,
2468 static const char * const pipe_crc_objects[] = {
2473 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
2477 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
2478 if (!strcmp(buf, pipe_crc_objects[i])) {
2486 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
2488 const char name = buf[0];
2490 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
2499 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
2503 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
2504 if (!strcmp(buf, pipe_crc_sources[i])) {
2512 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
2516 char *words[N_WORDS];
2518 enum intel_pipe_crc_object object;
2519 enum intel_pipe_crc_source source;
2521 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
2522 if (n_words != N_WORDS) {
2523 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
2528 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
2529 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
2533 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
2534 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
2538 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
2539 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
2543 return pipe_crc_set_source(dev, pipe, source);
2546 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
2547 size_t len, loff_t *offp)
2549 struct seq_file *m = file->private_data;
2550 struct drm_device *dev = m->private;
2557 if (len > PAGE_SIZE - 1) {
2558 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
2563 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
2567 if (copy_from_user(tmpbuf, ubuf, len)) {
2573 ret = display_crc_ctl_parse(dev, tmpbuf, len);
2584 static const struct file_operations i915_display_crc_ctl_fops = {
2585 .owner = THIS_MODULE,
2586 .open = display_crc_ctl_open,
2588 .llseek = seq_lseek,
2589 .release = single_release,
2590 .write = display_crc_ctl_write
2594 i915_wedged_get(void *data, u64 *val)
2596 struct drm_device *dev = data;
2597 drm_i915_private_t *dev_priv = dev->dev_private;
2599 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
2605 i915_wedged_set(void *data, u64 val)
2607 struct drm_device *dev = data;
2609 DRM_INFO("Manually setting wedged to %llu\n", val);
2610 i915_handle_error(dev, val);
2615 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
2616 i915_wedged_get, i915_wedged_set,
2620 i915_ring_stop_get(void *data, u64 *val)
2622 struct drm_device *dev = data;
2623 drm_i915_private_t *dev_priv = dev->dev_private;
2625 *val = dev_priv->gpu_error.stop_rings;
2631 i915_ring_stop_set(void *data, u64 val)
2633 struct drm_device *dev = data;
2634 struct drm_i915_private *dev_priv = dev->dev_private;
2637 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
2639 ret = mutex_lock_interruptible(&dev->struct_mutex);
2643 dev_priv->gpu_error.stop_rings = val;
2644 mutex_unlock(&dev->struct_mutex);
2649 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
2650 i915_ring_stop_get, i915_ring_stop_set,
2654 i915_ring_missed_irq_get(void *data, u64 *val)
2656 struct drm_device *dev = data;
2657 struct drm_i915_private *dev_priv = dev->dev_private;
2659 *val = dev_priv->gpu_error.missed_irq_rings;
2664 i915_ring_missed_irq_set(void *data, u64 val)
2666 struct drm_device *dev = data;
2667 struct drm_i915_private *dev_priv = dev->dev_private;
2670 /* Lock against concurrent debugfs callers */
2671 ret = mutex_lock_interruptible(&dev->struct_mutex);
2674 dev_priv->gpu_error.missed_irq_rings = val;
2675 mutex_unlock(&dev->struct_mutex);
2680 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
2681 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
2685 i915_ring_test_irq_get(void *data, u64 *val)
2687 struct drm_device *dev = data;
2688 struct drm_i915_private *dev_priv = dev->dev_private;
2690 *val = dev_priv->gpu_error.test_irq_rings;
2696 i915_ring_test_irq_set(void *data, u64 val)
2698 struct drm_device *dev = data;
2699 struct drm_i915_private *dev_priv = dev->dev_private;
2702 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
2704 /* Lock against concurrent debugfs callers */
2705 ret = mutex_lock_interruptible(&dev->struct_mutex);
2709 dev_priv->gpu_error.test_irq_rings = val;
2710 mutex_unlock(&dev->struct_mutex);
2715 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
2716 i915_ring_test_irq_get, i915_ring_test_irq_set,
2719 #define DROP_UNBOUND 0x1
2720 #define DROP_BOUND 0x2
2721 #define DROP_RETIRE 0x4
2722 #define DROP_ACTIVE 0x8
2723 #define DROP_ALL (DROP_UNBOUND | \
2728 i915_drop_caches_get(void *data, u64 *val)
2736 i915_drop_caches_set(void *data, u64 val)
2738 struct drm_device *dev = data;
2739 struct drm_i915_private *dev_priv = dev->dev_private;
2740 struct drm_i915_gem_object *obj, *next;
2741 struct i915_address_space *vm;
2742 struct i915_vma *vma, *x;
2745 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
2747 /* No need to check and wait for gpu resets, only libdrm auto-restarts
2748 * on ioctls on -EAGAIN. */
2749 ret = mutex_lock_interruptible(&dev->struct_mutex);
2753 if (val & DROP_ACTIVE) {
2754 ret = i915_gpu_idle(dev);
2759 if (val & (DROP_RETIRE | DROP_ACTIVE))
2760 i915_gem_retire_requests(dev);
2762 if (val & DROP_BOUND) {
2763 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2764 list_for_each_entry_safe(vma, x, &vm->inactive_list,
2766 if (vma->obj->pin_count)
2769 ret = i915_vma_unbind(vma);
2776 if (val & DROP_UNBOUND) {
2777 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
2779 if (obj->pages_pin_count == 0) {
2780 ret = i915_gem_object_put_pages(obj);
2787 mutex_unlock(&dev->struct_mutex);
2792 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
2793 i915_drop_caches_get, i915_drop_caches_set,
2797 i915_max_freq_get(void *data, u64 *val)
2799 struct drm_device *dev = data;
2800 drm_i915_private_t *dev_priv = dev->dev_private;
2803 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2806 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2808 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2812 if (IS_VALLEYVIEW(dev))
2813 *val = vlv_gpu_freq(dev_priv->mem_freq,
2814 dev_priv->rps.max_delay);
2816 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
2817 mutex_unlock(&dev_priv->rps.hw_lock);
2823 i915_max_freq_set(void *data, u64 val)
2825 struct drm_device *dev = data;
2826 struct drm_i915_private *dev_priv = dev->dev_private;
2829 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2832 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2834 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
2836 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2841 * Turbo will still be enabled, but won't go above the set value.
2843 if (IS_VALLEYVIEW(dev)) {
2844 val = vlv_freq_opcode(dev_priv->mem_freq, val);
2845 dev_priv->rps.max_delay = val;
2846 gen6_set_rps(dev, val);
2848 do_div(val, GT_FREQUENCY_MULTIPLIER);
2849 dev_priv->rps.max_delay = val;
2850 gen6_set_rps(dev, val);
2853 mutex_unlock(&dev_priv->rps.hw_lock);
2858 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
2859 i915_max_freq_get, i915_max_freq_set,
2863 i915_min_freq_get(void *data, u64 *val)
2865 struct drm_device *dev = data;
2866 drm_i915_private_t *dev_priv = dev->dev_private;
2869 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2872 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2874 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2878 if (IS_VALLEYVIEW(dev))
2879 *val = vlv_gpu_freq(dev_priv->mem_freq,
2880 dev_priv->rps.min_delay);
2882 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
2883 mutex_unlock(&dev_priv->rps.hw_lock);
2889 i915_min_freq_set(void *data, u64 val)
2891 struct drm_device *dev = data;
2892 struct drm_i915_private *dev_priv = dev->dev_private;
2895 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2898 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
2900 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
2902 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2907 * Turbo will still be enabled, but won't go below the set value.
2909 if (IS_VALLEYVIEW(dev)) {
2910 val = vlv_freq_opcode(dev_priv->mem_freq, val);
2911 dev_priv->rps.min_delay = val;
2912 valleyview_set_rps(dev, val);
2914 do_div(val, GT_FREQUENCY_MULTIPLIER);
2915 dev_priv->rps.min_delay = val;
2916 gen6_set_rps(dev, val);
2918 mutex_unlock(&dev_priv->rps.hw_lock);
2923 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
2924 i915_min_freq_get, i915_min_freq_set,
2928 i915_cache_sharing_get(void *data, u64 *val)
2930 struct drm_device *dev = data;
2931 drm_i915_private_t *dev_priv = dev->dev_private;
2935 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2938 ret = mutex_lock_interruptible(&dev->struct_mutex);
2942 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2943 mutex_unlock(&dev_priv->dev->struct_mutex);
2945 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
2951 i915_cache_sharing_set(void *data, u64 val)
2953 struct drm_device *dev = data;
2954 struct drm_i915_private *dev_priv = dev->dev_private;
2957 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2963 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
2965 /* Update the cache sharing policy here as well */
2966 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2967 snpcr &= ~GEN6_MBC_SNPCR_MASK;
2968 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
2969 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
2974 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2975 i915_cache_sharing_get, i915_cache_sharing_set,
2978 static int i915_forcewake_open(struct inode *inode, struct file *file)
2980 struct drm_device *dev = inode->i_private;
2981 struct drm_i915_private *dev_priv = dev->dev_private;
2983 if (INTEL_INFO(dev)->gen < 6)
2986 gen6_gt_force_wake_get(dev_priv);
2991 static int i915_forcewake_release(struct inode *inode, struct file *file)
2993 struct drm_device *dev = inode->i_private;
2994 struct drm_i915_private *dev_priv = dev->dev_private;
2996 if (INTEL_INFO(dev)->gen < 6)
2999 gen6_gt_force_wake_put(dev_priv);
3004 static const struct file_operations i915_forcewake_fops = {
3005 .owner = THIS_MODULE,
3006 .open = i915_forcewake_open,
3007 .release = i915_forcewake_release,
3010 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
3012 struct drm_device *dev = minor->dev;
3015 ent = debugfs_create_file("i915_forcewake_user",
3018 &i915_forcewake_fops);
3020 return PTR_ERR(ent);
3022 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
3025 static int i915_debugfs_create(struct dentry *root,
3026 struct drm_minor *minor,
3028 const struct file_operations *fops)
3030 struct drm_device *dev = minor->dev;
3033 ent = debugfs_create_file(name,
3038 return PTR_ERR(ent);
3040 return drm_add_fake_info_node(minor, ent, fops);
3043 static const struct drm_info_list i915_debugfs_list[] = {
3044 {"i915_capabilities", i915_capabilities, 0},
3045 {"i915_gem_objects", i915_gem_object_info, 0},
3046 {"i915_gem_gtt", i915_gem_gtt_info, 0},
3047 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
3048 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
3049 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
3050 {"i915_gem_stolen", i915_gem_stolen_list_info },
3051 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
3052 {"i915_gem_request", i915_gem_request_info, 0},
3053 {"i915_gem_seqno", i915_gem_seqno_info, 0},
3054 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
3055 {"i915_gem_interrupt", i915_interrupt_info, 0},
3056 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
3057 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
3058 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
3059 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
3060 {"i915_rstdby_delays", i915_rstdby_delays, 0},
3061 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
3062 {"i915_delayfreq_table", i915_delayfreq_table, 0},
3063 {"i915_inttoext_table", i915_inttoext_table, 0},
3064 {"i915_drpc_info", i915_drpc_info, 0},
3065 {"i915_emon_status", i915_emon_status, 0},
3066 {"i915_ring_freq_table", i915_ring_freq_table, 0},
3067 {"i915_gfxec", i915_gfxec, 0},
3068 {"i915_fbc_status", i915_fbc_status, 0},
3069 {"i915_ips_status", i915_ips_status, 0},
3070 {"i915_sr_status", i915_sr_status, 0},
3071 {"i915_opregion", i915_opregion, 0},
3072 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
3073 {"i915_context_status", i915_context_status, 0},
3074 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
3075 {"i915_swizzle_info", i915_swizzle_info, 0},
3076 {"i915_ppgtt_info", i915_ppgtt_info, 0},
3077 {"i915_dpio", i915_dpio_info, 0},
3078 {"i915_llc", i915_llc, 0},
3079 {"i915_edp_psr_status", i915_edp_psr_status, 0},
3080 {"i915_energy_uJ", i915_energy_uJ, 0},
3081 {"i915_pc8_status", i915_pc8_status, 0},
3083 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3085 static const struct i915_debugfs_files {
3087 const struct file_operations *fops;
3088 } i915_debugfs_files[] = {
3089 {"i915_wedged", &i915_wedged_fops},
3090 {"i915_max_freq", &i915_max_freq_fops},
3091 {"i915_min_freq", &i915_min_freq_fops},
3092 {"i915_cache_sharing", &i915_cache_sharing_fops},
3093 {"i915_ring_stop", &i915_ring_stop_fops},
3094 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
3095 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
3096 {"i915_gem_drop_caches", &i915_drop_caches_fops},
3097 {"i915_error_state", &i915_error_state_fops},
3098 {"i915_next_seqno", &i915_next_seqno_fops},
3099 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
3102 void intel_display_crc_init(struct drm_device *dev)
3104 struct drm_i915_private *dev_priv = dev->dev_private;
3107 for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
3108 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
3110 pipe_crc->opened = false;
3111 spin_lock_init(&pipe_crc->lock);
3112 init_waitqueue_head(&pipe_crc->wq);
3116 int i915_debugfs_init(struct drm_minor *minor)
3120 ret = i915_forcewake_create(minor->debugfs_root, minor);
3124 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3125 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
3130 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
3131 ret = i915_debugfs_create(minor->debugfs_root, minor,
3132 i915_debugfs_files[i].name,
3133 i915_debugfs_files[i].fops);
3138 return drm_debugfs_create_files(i915_debugfs_list,
3139 I915_DEBUGFS_ENTRIES,
3140 minor->debugfs_root, minor);
3143 void i915_debugfs_cleanup(struct drm_minor *minor)
3147 drm_debugfs_remove_files(i915_debugfs_list,
3148 I915_DEBUGFS_ENTRIES, minor);
3150 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
3153 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3154 struct drm_info_list *info_list =
3155 (struct drm_info_list *)&i915_pipe_crc_data[i];
3157 drm_debugfs_remove_files(info_list, 1, minor);
3160 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
3161 struct drm_info_list *info_list =
3162 (struct drm_info_list *) i915_debugfs_files[i].fops;
3164 drm_debugfs_remove_files(info_list, 1, minor);
3168 #endif /* CONFIG_DEBUG_FS */