2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
49 static const char *yesno(int v)
51 return v ? "yes" : "no";
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55 * allocated we need to hook into the minor for release. */
57 drm_add_fake_info_node(struct drm_minor *minor,
61 struct drm_info_node *node;
63 node = kmalloc(sizeof(*node), GFP_KERNEL);
71 node->info_ent = (void *) key;
73 mutex_lock(&minor->debugfs_lock);
74 list_add(&node->list, &minor->debugfs_list);
75 mutex_unlock(&minor->debugfs_lock);
80 static int i915_capabilities(struct seq_file *m, void *data)
82 struct drm_info_node *node = m->private;
83 struct drm_device *dev = node->minor->dev;
84 const struct intel_device_info *info = INTEL_INFO(dev);
86 seq_printf(m, "gen: %d\n", info->gen);
87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
97 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
99 if (obj->user_pin_count > 0)
101 else if (i915_gem_obj_is_pinned(obj))
107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
109 switch (obj->tiling_mode) {
111 case I915_TILING_NONE: return " ";
112 case I915_TILING_X: return "X";
113 case I915_TILING_Y: return "Y";
117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
119 return obj->has_global_gtt_mapping ? "g" : " ";
123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 struct i915_vma *vma;
128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
131 get_tiling_flag(obj),
132 get_global_flag(obj),
133 obj->base.size / 1024,
134 obj->base.read_domains,
135 obj->base.write_domain,
136 obj->last_read_seqno,
137 obj->last_write_seqno,
138 obj->last_fenced_seqno,
139 i915_cache_level_str(obj->cache_level),
140 obj->dirty ? " dirty" : "",
141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
143 seq_printf(m, " (name: %d)", obj->base.name);
144 list_for_each_entry(vma, &obj->vma_list, vma_link)
145 if (vma->pin_count > 0)
147 seq_printf(m, " (pinned x %d)", pin_count);
148 if (obj->pin_display)
149 seq_printf(m, " (display)");
150 if (obj->fence_reg != I915_FENCE_REG_NONE)
151 seq_printf(m, " (fence: %d)", obj->fence_reg);
152 list_for_each_entry(vma, &obj->vma_list, vma_link) {
153 if (!i915_is_ggtt(vma->vm))
157 seq_printf(m, "gtt offset: %08lx, size: %08lx)",
158 vma->node.start, vma->node.size);
161 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
162 if (obj->pin_mappable || obj->fault_mappable) {
164 if (obj->pin_mappable)
166 if (obj->fault_mappable)
169 seq_printf(m, " (%s mappable)", s);
171 if (obj->ring != NULL)
172 seq_printf(m, " (%s)", obj->ring->name);
173 if (obj->frontbuffer_bits)
174 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
177 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
179 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
180 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
184 static int i915_gem_object_list_info(struct seq_file *m, void *data)
186 struct drm_info_node *node = m->private;
187 uintptr_t list = (uintptr_t) node->info_ent->data;
188 struct list_head *head;
189 struct drm_device *dev = node->minor->dev;
190 struct drm_i915_private *dev_priv = dev->dev_private;
191 struct i915_address_space *vm = &dev_priv->gtt.base;
192 struct i915_vma *vma;
193 size_t total_obj_size, total_gtt_size;
196 ret = mutex_lock_interruptible(&dev->struct_mutex);
200 /* FIXME: the user of this interface might want more than just GGTT */
203 seq_puts(m, "Active:\n");
204 head = &vm->active_list;
207 seq_puts(m, "Inactive:\n");
208 head = &vm->inactive_list;
211 mutex_unlock(&dev->struct_mutex);
215 total_obj_size = total_gtt_size = count = 0;
216 list_for_each_entry(vma, head, mm_list) {
218 describe_obj(m, vma->obj);
220 total_obj_size += vma->obj->base.size;
221 total_gtt_size += vma->node.size;
224 mutex_unlock(&dev->struct_mutex);
226 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
227 count, total_obj_size, total_gtt_size);
231 static int obj_rank_by_stolen(void *priv,
232 struct list_head *A, struct list_head *B)
234 struct drm_i915_gem_object *a =
235 container_of(A, struct drm_i915_gem_object, obj_exec_link);
236 struct drm_i915_gem_object *b =
237 container_of(B, struct drm_i915_gem_object, obj_exec_link);
239 return a->stolen->start - b->stolen->start;
242 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
244 struct drm_info_node *node = m->private;
245 struct drm_device *dev = node->minor->dev;
246 struct drm_i915_private *dev_priv = dev->dev_private;
247 struct drm_i915_gem_object *obj;
248 size_t total_obj_size, total_gtt_size;
252 ret = mutex_lock_interruptible(&dev->struct_mutex);
256 total_obj_size = total_gtt_size = count = 0;
257 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
258 if (obj->stolen == NULL)
261 list_add(&obj->obj_exec_link, &stolen);
263 total_obj_size += obj->base.size;
264 total_gtt_size += i915_gem_obj_ggtt_size(obj);
267 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
268 if (obj->stolen == NULL)
271 list_add(&obj->obj_exec_link, &stolen);
273 total_obj_size += obj->base.size;
276 list_sort(NULL, &stolen, obj_rank_by_stolen);
277 seq_puts(m, "Stolen:\n");
278 while (!list_empty(&stolen)) {
279 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
281 describe_obj(m, obj);
283 list_del_init(&obj->obj_exec_link);
285 mutex_unlock(&dev->struct_mutex);
287 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
288 count, total_obj_size, total_gtt_size);
292 #define count_objects(list, member) do { \
293 list_for_each_entry(obj, list, member) { \
294 size += i915_gem_obj_ggtt_size(obj); \
296 if (obj->map_and_fenceable) { \
297 mappable_size += i915_gem_obj_ggtt_size(obj); \
304 struct drm_i915_file_private *file_priv;
306 size_t total, unbound;
307 size_t global, shared;
308 size_t active, inactive;
311 static int per_file_stats(int id, void *ptr, void *data)
313 struct drm_i915_gem_object *obj = ptr;
314 struct file_stats *stats = data;
315 struct i915_vma *vma;
318 stats->total += obj->base.size;
320 if (obj->base.name || obj->base.dma_buf)
321 stats->shared += obj->base.size;
323 if (USES_FULL_PPGTT(obj->base.dev)) {
324 list_for_each_entry(vma, &obj->vma_list, vma_link) {
325 struct i915_hw_ppgtt *ppgtt;
327 if (!drm_mm_node_allocated(&vma->node))
330 if (i915_is_ggtt(vma->vm)) {
331 stats->global += obj->base.size;
335 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
336 if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
339 if (obj->ring) /* XXX per-vma statistic */
340 stats->active += obj->base.size;
342 stats->inactive += obj->base.size;
347 if (i915_gem_obj_ggtt_bound(obj)) {
348 stats->global += obj->base.size;
350 stats->active += obj->base.size;
352 stats->inactive += obj->base.size;
357 if (!list_empty(&obj->global_list))
358 stats->unbound += obj->base.size;
363 #define count_vmas(list, member) do { \
364 list_for_each_entry(vma, list, member) { \
365 size += i915_gem_obj_ggtt_size(vma->obj); \
367 if (vma->obj->map_and_fenceable) { \
368 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
374 static int i915_gem_object_info(struct seq_file *m, void* data)
376 struct drm_info_node *node = m->private;
377 struct drm_device *dev = node->minor->dev;
378 struct drm_i915_private *dev_priv = dev->dev_private;
379 u32 count, mappable_count, purgeable_count;
380 size_t size, mappable_size, purgeable_size;
381 struct drm_i915_gem_object *obj;
382 struct i915_address_space *vm = &dev_priv->gtt.base;
383 struct drm_file *file;
384 struct i915_vma *vma;
387 ret = mutex_lock_interruptible(&dev->struct_mutex);
391 seq_printf(m, "%u objects, %zu bytes\n",
392 dev_priv->mm.object_count,
393 dev_priv->mm.object_memory);
395 size = count = mappable_size = mappable_count = 0;
396 count_objects(&dev_priv->mm.bound_list, global_list);
397 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
398 count, mappable_count, size, mappable_size);
400 size = count = mappable_size = mappable_count = 0;
401 count_vmas(&vm->active_list, mm_list);
402 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
403 count, mappable_count, size, mappable_size);
405 size = count = mappable_size = mappable_count = 0;
406 count_vmas(&vm->inactive_list, mm_list);
407 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
408 count, mappable_count, size, mappable_size);
410 size = count = purgeable_size = purgeable_count = 0;
411 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
412 size += obj->base.size, ++count;
413 if (obj->madv == I915_MADV_DONTNEED)
414 purgeable_size += obj->base.size, ++purgeable_count;
416 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
418 size = count = mappable_size = mappable_count = 0;
419 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
420 if (obj->fault_mappable) {
421 size += i915_gem_obj_ggtt_size(obj);
424 if (obj->pin_mappable) {
425 mappable_size += i915_gem_obj_ggtt_size(obj);
428 if (obj->madv == I915_MADV_DONTNEED) {
429 purgeable_size += obj->base.size;
433 seq_printf(m, "%u purgeable objects, %zu bytes\n",
434 purgeable_count, purgeable_size);
435 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
436 mappable_count, mappable_size);
437 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
440 seq_printf(m, "%zu [%lu] gtt total\n",
441 dev_priv->gtt.base.total,
442 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
445 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
446 struct file_stats stats;
447 struct task_struct *task;
449 memset(&stats, 0, sizeof(stats));
450 stats.file_priv = file->driver_priv;
451 spin_lock(&file->table_lock);
452 idr_for_each(&file->object_idr, per_file_stats, &stats);
453 spin_unlock(&file->table_lock);
455 * Although we have a valid reference on file->pid, that does
456 * not guarantee that the task_struct who called get_pid() is
457 * still alive (e.g. get_pid(current) => fork() => exit()).
458 * Therefore, we need to protect this ->comm access using RCU.
461 task = pid_task(file->pid, PIDTYPE_PID);
462 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
463 task ? task->comm : "<unknown>",
474 mutex_unlock(&dev->struct_mutex);
479 static int i915_gem_gtt_info(struct seq_file *m, void *data)
481 struct drm_info_node *node = m->private;
482 struct drm_device *dev = node->minor->dev;
483 uintptr_t list = (uintptr_t) node->info_ent->data;
484 struct drm_i915_private *dev_priv = dev->dev_private;
485 struct drm_i915_gem_object *obj;
486 size_t total_obj_size, total_gtt_size;
489 ret = mutex_lock_interruptible(&dev->struct_mutex);
493 total_obj_size = total_gtt_size = count = 0;
494 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
495 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
499 describe_obj(m, obj);
501 total_obj_size += obj->base.size;
502 total_gtt_size += i915_gem_obj_ggtt_size(obj);
506 mutex_unlock(&dev->struct_mutex);
508 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
509 count, total_obj_size, total_gtt_size);
514 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
516 struct drm_info_node *node = m->private;
517 struct drm_device *dev = node->minor->dev;
519 struct intel_crtc *crtc;
522 ret = mutex_lock_interruptible(&dev->struct_mutex);
526 for_each_intel_crtc(dev, crtc) {
527 const char pipe = pipe_name(crtc->pipe);
528 const char plane = plane_name(crtc->plane);
529 struct intel_unpin_work *work;
531 spin_lock_irqsave(&dev->event_lock, flags);
532 work = crtc->unpin_work;
534 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
537 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
538 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
541 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
544 if (work->enable_stall_check)
545 seq_puts(m, "Stall check enabled, ");
547 seq_puts(m, "Stall check waiting for page flip ioctl, ");
548 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
550 if (work->old_fb_obj) {
551 struct drm_i915_gem_object *obj = work->old_fb_obj;
553 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
554 i915_gem_obj_ggtt_offset(obj));
556 if (work->pending_flip_obj) {
557 struct drm_i915_gem_object *obj = work->pending_flip_obj;
559 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
560 i915_gem_obj_ggtt_offset(obj));
563 spin_unlock_irqrestore(&dev->event_lock, flags);
566 mutex_unlock(&dev->struct_mutex);
571 static int i915_gem_request_info(struct seq_file *m, void *data)
573 struct drm_info_node *node = m->private;
574 struct drm_device *dev = node->minor->dev;
575 struct drm_i915_private *dev_priv = dev->dev_private;
576 struct intel_engine_cs *ring;
577 struct drm_i915_gem_request *gem_request;
580 ret = mutex_lock_interruptible(&dev->struct_mutex);
585 for_each_ring(ring, dev_priv, i) {
586 if (list_empty(&ring->request_list))
589 seq_printf(m, "%s requests:\n", ring->name);
590 list_for_each_entry(gem_request,
593 seq_printf(m, " %d @ %d\n",
595 (int) (jiffies - gem_request->emitted_jiffies));
599 mutex_unlock(&dev->struct_mutex);
602 seq_puts(m, "No requests\n");
607 static void i915_ring_seqno_info(struct seq_file *m,
608 struct intel_engine_cs *ring)
610 if (ring->get_seqno) {
611 seq_printf(m, "Current sequence (%s): %u\n",
612 ring->name, ring->get_seqno(ring, false));
616 static int i915_gem_seqno_info(struct seq_file *m, void *data)
618 struct drm_info_node *node = m->private;
619 struct drm_device *dev = node->minor->dev;
620 struct drm_i915_private *dev_priv = dev->dev_private;
621 struct intel_engine_cs *ring;
624 ret = mutex_lock_interruptible(&dev->struct_mutex);
627 intel_runtime_pm_get(dev_priv);
629 for_each_ring(ring, dev_priv, i)
630 i915_ring_seqno_info(m, ring);
632 intel_runtime_pm_put(dev_priv);
633 mutex_unlock(&dev->struct_mutex);
639 static int i915_interrupt_info(struct seq_file *m, void *data)
641 struct drm_info_node *node = m->private;
642 struct drm_device *dev = node->minor->dev;
643 struct drm_i915_private *dev_priv = dev->dev_private;
644 struct intel_engine_cs *ring;
647 ret = mutex_lock_interruptible(&dev->struct_mutex);
650 intel_runtime_pm_get(dev_priv);
652 if (IS_CHERRYVIEW(dev)) {
654 seq_printf(m, "Master Interrupt Control:\t%08x\n",
655 I915_READ(GEN8_MASTER_IRQ));
657 seq_printf(m, "Display IER:\t%08x\n",
659 seq_printf(m, "Display IIR:\t%08x\n",
661 seq_printf(m, "Display IIR_RW:\t%08x\n",
662 I915_READ(VLV_IIR_RW));
663 seq_printf(m, "Display IMR:\t%08x\n",
666 seq_printf(m, "Pipe %c stat:\t%08x\n",
668 I915_READ(PIPESTAT(pipe)));
670 seq_printf(m, "Port hotplug:\t%08x\n",
671 I915_READ(PORT_HOTPLUG_EN));
672 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
673 I915_READ(VLV_DPFLIPSTAT));
674 seq_printf(m, "DPINVGTT:\t%08x\n",
675 I915_READ(DPINVGTT));
677 for (i = 0; i < 4; i++) {
678 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
679 i, I915_READ(GEN8_GT_IMR(i)));
680 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
681 i, I915_READ(GEN8_GT_IIR(i)));
682 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
683 i, I915_READ(GEN8_GT_IER(i)));
686 seq_printf(m, "PCU interrupt mask:\t%08x\n",
687 I915_READ(GEN8_PCU_IMR));
688 seq_printf(m, "PCU interrupt identity:\t%08x\n",
689 I915_READ(GEN8_PCU_IIR));
690 seq_printf(m, "PCU interrupt enable:\t%08x\n",
691 I915_READ(GEN8_PCU_IER));
692 } else if (INTEL_INFO(dev)->gen >= 8) {
693 seq_printf(m, "Master Interrupt Control:\t%08x\n",
694 I915_READ(GEN8_MASTER_IRQ));
696 for (i = 0; i < 4; i++) {
697 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
698 i, I915_READ(GEN8_GT_IMR(i)));
699 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
700 i, I915_READ(GEN8_GT_IIR(i)));
701 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
702 i, I915_READ(GEN8_GT_IER(i)));
705 for_each_pipe(pipe) {
706 seq_printf(m, "Pipe %c IMR:\t%08x\n",
708 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
709 seq_printf(m, "Pipe %c IIR:\t%08x\n",
711 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
712 seq_printf(m, "Pipe %c IER:\t%08x\n",
714 I915_READ(GEN8_DE_PIPE_IER(pipe)));
717 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
718 I915_READ(GEN8_DE_PORT_IMR));
719 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
720 I915_READ(GEN8_DE_PORT_IIR));
721 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
722 I915_READ(GEN8_DE_PORT_IER));
724 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
725 I915_READ(GEN8_DE_MISC_IMR));
726 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
727 I915_READ(GEN8_DE_MISC_IIR));
728 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
729 I915_READ(GEN8_DE_MISC_IER));
731 seq_printf(m, "PCU interrupt mask:\t%08x\n",
732 I915_READ(GEN8_PCU_IMR));
733 seq_printf(m, "PCU interrupt identity:\t%08x\n",
734 I915_READ(GEN8_PCU_IIR));
735 seq_printf(m, "PCU interrupt enable:\t%08x\n",
736 I915_READ(GEN8_PCU_IER));
737 } else if (IS_VALLEYVIEW(dev)) {
738 seq_printf(m, "Display IER:\t%08x\n",
740 seq_printf(m, "Display IIR:\t%08x\n",
742 seq_printf(m, "Display IIR_RW:\t%08x\n",
743 I915_READ(VLV_IIR_RW));
744 seq_printf(m, "Display IMR:\t%08x\n",
747 seq_printf(m, "Pipe %c stat:\t%08x\n",
749 I915_READ(PIPESTAT(pipe)));
751 seq_printf(m, "Master IER:\t%08x\n",
752 I915_READ(VLV_MASTER_IER));
754 seq_printf(m, "Render IER:\t%08x\n",
756 seq_printf(m, "Render IIR:\t%08x\n",
758 seq_printf(m, "Render IMR:\t%08x\n",
761 seq_printf(m, "PM IER:\t\t%08x\n",
762 I915_READ(GEN6_PMIER));
763 seq_printf(m, "PM IIR:\t\t%08x\n",
764 I915_READ(GEN6_PMIIR));
765 seq_printf(m, "PM IMR:\t\t%08x\n",
766 I915_READ(GEN6_PMIMR));
768 seq_printf(m, "Port hotplug:\t%08x\n",
769 I915_READ(PORT_HOTPLUG_EN));
770 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
771 I915_READ(VLV_DPFLIPSTAT));
772 seq_printf(m, "DPINVGTT:\t%08x\n",
773 I915_READ(DPINVGTT));
775 } else if (!HAS_PCH_SPLIT(dev)) {
776 seq_printf(m, "Interrupt enable: %08x\n",
778 seq_printf(m, "Interrupt identity: %08x\n",
780 seq_printf(m, "Interrupt mask: %08x\n",
783 seq_printf(m, "Pipe %c stat: %08x\n",
785 I915_READ(PIPESTAT(pipe)));
787 seq_printf(m, "North Display Interrupt enable: %08x\n",
789 seq_printf(m, "North Display Interrupt identity: %08x\n",
791 seq_printf(m, "North Display Interrupt mask: %08x\n",
793 seq_printf(m, "South Display Interrupt enable: %08x\n",
795 seq_printf(m, "South Display Interrupt identity: %08x\n",
797 seq_printf(m, "South Display Interrupt mask: %08x\n",
799 seq_printf(m, "Graphics Interrupt enable: %08x\n",
801 seq_printf(m, "Graphics Interrupt identity: %08x\n",
803 seq_printf(m, "Graphics Interrupt mask: %08x\n",
806 for_each_ring(ring, dev_priv, i) {
807 if (INTEL_INFO(dev)->gen >= 6) {
809 "Graphics Interrupt mask (%s): %08x\n",
810 ring->name, I915_READ_IMR(ring));
812 i915_ring_seqno_info(m, ring);
814 intel_runtime_pm_put(dev_priv);
815 mutex_unlock(&dev->struct_mutex);
820 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
822 struct drm_info_node *node = m->private;
823 struct drm_device *dev = node->minor->dev;
824 struct drm_i915_private *dev_priv = dev->dev_private;
827 ret = mutex_lock_interruptible(&dev->struct_mutex);
831 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
832 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
833 for (i = 0; i < dev_priv->num_fence_regs; i++) {
834 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
836 seq_printf(m, "Fence %d, pin count = %d, object = ",
837 i, dev_priv->fence_regs[i].pin_count);
839 seq_puts(m, "unused");
841 describe_obj(m, obj);
845 mutex_unlock(&dev->struct_mutex);
849 static int i915_hws_info(struct seq_file *m, void *data)
851 struct drm_info_node *node = m->private;
852 struct drm_device *dev = node->minor->dev;
853 struct drm_i915_private *dev_priv = dev->dev_private;
854 struct intel_engine_cs *ring;
858 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
859 hws = ring->status_page.page_addr;
863 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
864 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
866 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
872 i915_error_state_write(struct file *filp,
873 const char __user *ubuf,
877 struct i915_error_state_file_priv *error_priv = filp->private_data;
878 struct drm_device *dev = error_priv->dev;
881 DRM_DEBUG_DRIVER("Resetting error state\n");
883 ret = mutex_lock_interruptible(&dev->struct_mutex);
887 i915_destroy_error_state(dev);
888 mutex_unlock(&dev->struct_mutex);
893 static int i915_error_state_open(struct inode *inode, struct file *file)
895 struct drm_device *dev = inode->i_private;
896 struct i915_error_state_file_priv *error_priv;
898 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
902 error_priv->dev = dev;
904 i915_error_state_get(dev, error_priv);
906 file->private_data = error_priv;
911 static int i915_error_state_release(struct inode *inode, struct file *file)
913 struct i915_error_state_file_priv *error_priv = file->private_data;
915 i915_error_state_put(error_priv);
921 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
922 size_t count, loff_t *pos)
924 struct i915_error_state_file_priv *error_priv = file->private_data;
925 struct drm_i915_error_state_buf error_str;
927 ssize_t ret_count = 0;
930 ret = i915_error_state_buf_init(&error_str, count, *pos);
934 ret = i915_error_state_to_str(&error_str, error_priv);
938 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
945 *pos = error_str.start + ret_count;
947 i915_error_state_buf_release(&error_str);
948 return ret ?: ret_count;
951 static const struct file_operations i915_error_state_fops = {
952 .owner = THIS_MODULE,
953 .open = i915_error_state_open,
954 .read = i915_error_state_read,
955 .write = i915_error_state_write,
956 .llseek = default_llseek,
957 .release = i915_error_state_release,
961 i915_next_seqno_get(void *data, u64 *val)
963 struct drm_device *dev = data;
964 struct drm_i915_private *dev_priv = dev->dev_private;
967 ret = mutex_lock_interruptible(&dev->struct_mutex);
971 *val = dev_priv->next_seqno;
972 mutex_unlock(&dev->struct_mutex);
978 i915_next_seqno_set(void *data, u64 val)
980 struct drm_device *dev = data;
983 ret = mutex_lock_interruptible(&dev->struct_mutex);
987 ret = i915_gem_set_seqno(dev, val);
988 mutex_unlock(&dev->struct_mutex);
993 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
994 i915_next_seqno_get, i915_next_seqno_set,
997 static int i915_frequency_info(struct seq_file *m, void *unused)
999 struct drm_info_node *node = m->private;
1000 struct drm_device *dev = node->minor->dev;
1001 struct drm_i915_private *dev_priv = dev->dev_private;
1004 intel_runtime_pm_get(dev_priv);
1006 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1009 u16 rgvswctl = I915_READ16(MEMSWCTL);
1010 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1012 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1013 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1014 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1016 seq_printf(m, "Current P-state: %d\n",
1017 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1018 } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
1019 IS_BROADWELL(dev)) {
1020 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1021 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1022 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1023 u32 rpmodectl, rpinclimit, rpdeclimit;
1024 u32 rpstat, cagf, reqf;
1025 u32 rpupei, rpcurup, rpprevup;
1026 u32 rpdownei, rpcurdown, rpprevdown;
1029 /* RPSTAT1 is in the GT power well */
1030 ret = mutex_lock_interruptible(&dev->struct_mutex);
1034 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
1036 reqf = I915_READ(GEN6_RPNSWREQ);
1037 reqf &= ~GEN6_TURBO_DISABLE;
1038 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1042 reqf *= GT_FREQUENCY_MULTIPLIER;
1044 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1045 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1046 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1048 rpstat = I915_READ(GEN6_RPSTAT1);
1049 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1050 rpcurup = I915_READ(GEN6_RP_CUR_UP);
1051 rpprevup = I915_READ(GEN6_RP_PREV_UP);
1052 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1053 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1054 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1055 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1056 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1058 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1059 cagf *= GT_FREQUENCY_MULTIPLIER;
1061 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
1062 mutex_unlock(&dev->struct_mutex);
1064 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1065 I915_READ(GEN6_PMIER),
1066 I915_READ(GEN6_PMIMR),
1067 I915_READ(GEN6_PMISR),
1068 I915_READ(GEN6_PMIIR),
1069 I915_READ(GEN6_PMINTRMSK));
1070 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1071 seq_printf(m, "Render p-state ratio: %d\n",
1072 (gt_perf_status & 0xff00) >> 8);
1073 seq_printf(m, "Render p-state VID: %d\n",
1074 gt_perf_status & 0xff);
1075 seq_printf(m, "Render p-state limit: %d\n",
1076 rp_state_limits & 0xff);
1077 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1078 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1079 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1080 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1081 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1082 seq_printf(m, "CAGF: %dMHz\n", cagf);
1083 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1084 GEN6_CURICONT_MASK);
1085 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1086 GEN6_CURBSYTAVG_MASK);
1087 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1088 GEN6_CURBSYTAVG_MASK);
1089 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1091 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1092 GEN6_CURBSYTAVG_MASK);
1093 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1094 GEN6_CURBSYTAVG_MASK);
1096 max_freq = (rp_state_cap & 0xff0000) >> 16;
1097 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1098 max_freq * GT_FREQUENCY_MULTIPLIER);
1100 max_freq = (rp_state_cap & 0xff00) >> 8;
1101 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1102 max_freq * GT_FREQUENCY_MULTIPLIER);
1104 max_freq = rp_state_cap & 0xff;
1105 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1106 max_freq * GT_FREQUENCY_MULTIPLIER);
1108 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1109 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
1110 } else if (IS_VALLEYVIEW(dev)) {
1113 mutex_lock(&dev_priv->rps.hw_lock);
1114 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1115 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1116 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1118 seq_printf(m, "max GPU freq: %d MHz\n",
1119 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1121 seq_printf(m, "min GPU freq: %d MHz\n",
1122 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1124 seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
1125 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1127 seq_printf(m, "current GPU freq: %d MHz\n",
1128 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1129 mutex_unlock(&dev_priv->rps.hw_lock);
1131 seq_puts(m, "no P-state info available\n");
1135 intel_runtime_pm_put(dev_priv);
1139 static int ironlake_drpc_info(struct seq_file *m)
1141 struct drm_info_node *node = m->private;
1142 struct drm_device *dev = node->minor->dev;
1143 struct drm_i915_private *dev_priv = dev->dev_private;
1144 u32 rgvmodectl, rstdbyctl;
1148 ret = mutex_lock_interruptible(&dev->struct_mutex);
1151 intel_runtime_pm_get(dev_priv);
1153 rgvmodectl = I915_READ(MEMMODECTL);
1154 rstdbyctl = I915_READ(RSTDBYCTL);
1155 crstandvid = I915_READ16(CRSTANDVID);
1157 intel_runtime_pm_put(dev_priv);
1158 mutex_unlock(&dev->struct_mutex);
1160 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1162 seq_printf(m, "Boost freq: %d\n",
1163 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1164 MEMMODE_BOOST_FREQ_SHIFT);
1165 seq_printf(m, "HW control enabled: %s\n",
1166 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1167 seq_printf(m, "SW control enabled: %s\n",
1168 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1169 seq_printf(m, "Gated voltage change: %s\n",
1170 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1171 seq_printf(m, "Starting frequency: P%d\n",
1172 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1173 seq_printf(m, "Max P-state: P%d\n",
1174 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1175 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1176 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1177 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1178 seq_printf(m, "Render standby enabled: %s\n",
1179 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1180 seq_puts(m, "Current RS state: ");
1181 switch (rstdbyctl & RSX_STATUS_MASK) {
1183 seq_puts(m, "on\n");
1185 case RSX_STATUS_RC1:
1186 seq_puts(m, "RC1\n");
1188 case RSX_STATUS_RC1E:
1189 seq_puts(m, "RC1E\n");
1191 case RSX_STATUS_RS1:
1192 seq_puts(m, "RS1\n");
1194 case RSX_STATUS_RS2:
1195 seq_puts(m, "RS2 (RC6)\n");
1197 case RSX_STATUS_RS3:
1198 seq_puts(m, "RC3 (RC6+)\n");
1201 seq_puts(m, "unknown\n");
1208 static int vlv_drpc_info(struct seq_file *m)
1211 struct drm_info_node *node = m->private;
1212 struct drm_device *dev = node->minor->dev;
1213 struct drm_i915_private *dev_priv = dev->dev_private;
1214 u32 rpmodectl1, rcctl1;
1215 unsigned fw_rendercount = 0, fw_mediacount = 0;
1217 intel_runtime_pm_get(dev_priv);
1219 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1220 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1222 intel_runtime_pm_put(dev_priv);
1224 seq_printf(m, "Video Turbo Mode: %s\n",
1225 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1226 seq_printf(m, "Turbo enabled: %s\n",
1227 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1228 seq_printf(m, "HW control enabled: %s\n",
1229 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1230 seq_printf(m, "SW control enabled: %s\n",
1231 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1232 GEN6_RP_MEDIA_SW_MODE));
1233 seq_printf(m, "RC6 Enabled: %s\n",
1234 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1235 GEN6_RC_CTL_EI_MODE(1))));
1236 seq_printf(m, "Render Power Well: %s\n",
1237 (I915_READ(VLV_GTLC_PW_STATUS) &
1238 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1239 seq_printf(m, "Media Power Well: %s\n",
1240 (I915_READ(VLV_GTLC_PW_STATUS) &
1241 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1243 seq_printf(m, "Render RC6 residency since boot: %u\n",
1244 I915_READ(VLV_GT_RENDER_RC6));
1245 seq_printf(m, "Media RC6 residency since boot: %u\n",
1246 I915_READ(VLV_GT_MEDIA_RC6));
1248 spin_lock_irq(&dev_priv->uncore.lock);
1249 fw_rendercount = dev_priv->uncore.fw_rendercount;
1250 fw_mediacount = dev_priv->uncore.fw_mediacount;
1251 spin_unlock_irq(&dev_priv->uncore.lock);
1253 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
1254 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
1261 static int gen6_drpc_info(struct seq_file *m)
1264 struct drm_info_node *node = m->private;
1265 struct drm_device *dev = node->minor->dev;
1266 struct drm_i915_private *dev_priv = dev->dev_private;
1267 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1268 unsigned forcewake_count;
1271 ret = mutex_lock_interruptible(&dev->struct_mutex);
1274 intel_runtime_pm_get(dev_priv);
1276 spin_lock_irq(&dev_priv->uncore.lock);
1277 forcewake_count = dev_priv->uncore.forcewake_count;
1278 spin_unlock_irq(&dev_priv->uncore.lock);
1280 if (forcewake_count) {
1281 seq_puts(m, "RC information inaccurate because somebody "
1282 "holds a forcewake reference \n");
1284 /* NB: we cannot use forcewake, else we read the wrong values */
1285 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1287 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1290 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1291 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1293 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1294 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1295 mutex_unlock(&dev->struct_mutex);
1296 mutex_lock(&dev_priv->rps.hw_lock);
1297 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1298 mutex_unlock(&dev_priv->rps.hw_lock);
1300 intel_runtime_pm_put(dev_priv);
1302 seq_printf(m, "Video Turbo Mode: %s\n",
1303 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1304 seq_printf(m, "HW control enabled: %s\n",
1305 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1306 seq_printf(m, "SW control enabled: %s\n",
1307 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1308 GEN6_RP_MEDIA_SW_MODE));
1309 seq_printf(m, "RC1e Enabled: %s\n",
1310 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1311 seq_printf(m, "RC6 Enabled: %s\n",
1312 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1313 seq_printf(m, "Deep RC6 Enabled: %s\n",
1314 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1315 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1316 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1317 seq_puts(m, "Current RC state: ");
1318 switch (gt_core_status & GEN6_RCn_MASK) {
1320 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1321 seq_puts(m, "Core Power Down\n");
1323 seq_puts(m, "on\n");
1326 seq_puts(m, "RC3\n");
1329 seq_puts(m, "RC6\n");
1332 seq_puts(m, "RC7\n");
1335 seq_puts(m, "Unknown\n");
1339 seq_printf(m, "Core Power Down: %s\n",
1340 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1342 /* Not exactly sure what this is */
1343 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1344 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1345 seq_printf(m, "RC6 residency since boot: %u\n",
1346 I915_READ(GEN6_GT_GFX_RC6));
1347 seq_printf(m, "RC6+ residency since boot: %u\n",
1348 I915_READ(GEN6_GT_GFX_RC6p));
1349 seq_printf(m, "RC6++ residency since boot: %u\n",
1350 I915_READ(GEN6_GT_GFX_RC6pp));
1352 seq_printf(m, "RC6 voltage: %dmV\n",
1353 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1354 seq_printf(m, "RC6+ voltage: %dmV\n",
1355 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1356 seq_printf(m, "RC6++ voltage: %dmV\n",
1357 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1361 static int i915_drpc_info(struct seq_file *m, void *unused)
1363 struct drm_info_node *node = m->private;
1364 struct drm_device *dev = node->minor->dev;
1366 if (IS_VALLEYVIEW(dev))
1367 return vlv_drpc_info(m);
1368 else if (IS_GEN6(dev) || IS_GEN7(dev))
1369 return gen6_drpc_info(m);
1371 return ironlake_drpc_info(m);
1374 static int i915_fbc_status(struct seq_file *m, void *unused)
1376 struct drm_info_node *node = m->private;
1377 struct drm_device *dev = node->minor->dev;
1378 struct drm_i915_private *dev_priv = dev->dev_private;
1380 if (!HAS_FBC(dev)) {
1381 seq_puts(m, "FBC unsupported on this chipset\n");
1385 intel_runtime_pm_get(dev_priv);
1387 if (intel_fbc_enabled(dev)) {
1388 seq_puts(m, "FBC enabled\n");
1390 seq_puts(m, "FBC disabled: ");
1391 switch (dev_priv->fbc.no_fbc_reason) {
1393 seq_puts(m, "FBC actived, but currently disabled in hardware");
1395 case FBC_UNSUPPORTED:
1396 seq_puts(m, "unsupported by this chipset");
1399 seq_puts(m, "no outputs");
1401 case FBC_STOLEN_TOO_SMALL:
1402 seq_puts(m, "not enough stolen memory");
1404 case FBC_UNSUPPORTED_MODE:
1405 seq_puts(m, "mode not supported");
1407 case FBC_MODE_TOO_LARGE:
1408 seq_puts(m, "mode too large");
1411 seq_puts(m, "FBC unsupported on plane");
1414 seq_puts(m, "scanout buffer not tiled");
1416 case FBC_MULTIPLE_PIPES:
1417 seq_puts(m, "multiple pipes are enabled");
1419 case FBC_MODULE_PARAM:
1420 seq_puts(m, "disabled per module param (default off)");
1422 case FBC_CHIP_DEFAULT:
1423 seq_puts(m, "disabled per chip default");
1426 seq_puts(m, "unknown reason");
1431 intel_runtime_pm_put(dev_priv);
1436 static int i915_ips_status(struct seq_file *m, void *unused)
1438 struct drm_info_node *node = m->private;
1439 struct drm_device *dev = node->minor->dev;
1440 struct drm_i915_private *dev_priv = dev->dev_private;
1442 if (!HAS_IPS(dev)) {
1443 seq_puts(m, "not supported\n");
1447 intel_runtime_pm_get(dev_priv);
1449 seq_printf(m, "Enabled by kernel parameter: %s\n",
1450 yesno(i915.enable_ips));
1452 if (INTEL_INFO(dev)->gen >= 8) {
1453 seq_puts(m, "Currently: unknown\n");
1455 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1456 seq_puts(m, "Currently: enabled\n");
1458 seq_puts(m, "Currently: disabled\n");
1461 intel_runtime_pm_put(dev_priv);
1466 static int i915_sr_status(struct seq_file *m, void *unused)
1468 struct drm_info_node *node = m->private;
1469 struct drm_device *dev = node->minor->dev;
1470 struct drm_i915_private *dev_priv = dev->dev_private;
1471 bool sr_enabled = false;
1473 intel_runtime_pm_get(dev_priv);
1475 if (HAS_PCH_SPLIT(dev))
1476 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1477 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1478 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1479 else if (IS_I915GM(dev))
1480 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1481 else if (IS_PINEVIEW(dev))
1482 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1484 intel_runtime_pm_put(dev_priv);
1486 seq_printf(m, "self-refresh: %s\n",
1487 sr_enabled ? "enabled" : "disabled");
1492 static int i915_emon_status(struct seq_file *m, void *unused)
1494 struct drm_info_node *node = m->private;
1495 struct drm_device *dev = node->minor->dev;
1496 struct drm_i915_private *dev_priv = dev->dev_private;
1497 unsigned long temp, chipset, gfx;
1503 ret = mutex_lock_interruptible(&dev->struct_mutex);
1507 temp = i915_mch_val(dev_priv);
1508 chipset = i915_chipset_val(dev_priv);
1509 gfx = i915_gfx_val(dev_priv);
1510 mutex_unlock(&dev->struct_mutex);
1512 seq_printf(m, "GMCH temp: %ld\n", temp);
1513 seq_printf(m, "Chipset power: %ld\n", chipset);
1514 seq_printf(m, "GFX power: %ld\n", gfx);
1515 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1520 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1522 struct drm_info_node *node = m->private;
1523 struct drm_device *dev = node->minor->dev;
1524 struct drm_i915_private *dev_priv = dev->dev_private;
1526 int gpu_freq, ia_freq;
1528 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1529 seq_puts(m, "unsupported on this chipset\n");
1533 intel_runtime_pm_get(dev_priv);
1535 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1537 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1541 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1543 for (gpu_freq = dev_priv->rps.min_freq_softlimit;
1544 gpu_freq <= dev_priv->rps.max_freq_softlimit;
1547 sandybridge_pcode_read(dev_priv,
1548 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1550 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1551 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1552 ((ia_freq >> 0) & 0xff) * 100,
1553 ((ia_freq >> 8) & 0xff) * 100);
1556 mutex_unlock(&dev_priv->rps.hw_lock);
1559 intel_runtime_pm_put(dev_priv);
1563 static int i915_opregion(struct seq_file *m, void *unused)
1565 struct drm_info_node *node = m->private;
1566 struct drm_device *dev = node->minor->dev;
1567 struct drm_i915_private *dev_priv = dev->dev_private;
1568 struct intel_opregion *opregion = &dev_priv->opregion;
1569 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1575 ret = mutex_lock_interruptible(&dev->struct_mutex);
1579 if (opregion->header) {
1580 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1581 seq_write(m, data, OPREGION_SIZE);
1584 mutex_unlock(&dev->struct_mutex);
1591 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1593 struct drm_info_node *node = m->private;
1594 struct drm_device *dev = node->minor->dev;
1595 struct intel_fbdev *ifbdev = NULL;
1596 struct intel_framebuffer *fb;
1598 #ifdef CONFIG_DRM_I915_FBDEV
1599 struct drm_i915_private *dev_priv = dev->dev_private;
1601 ifbdev = dev_priv->fbdev;
1602 fb = to_intel_framebuffer(ifbdev->helper.fb);
1604 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1608 fb->base.bits_per_pixel,
1609 atomic_read(&fb->base.refcount.refcount));
1610 describe_obj(m, fb->obj);
1614 mutex_lock(&dev->mode_config.fb_lock);
1615 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1616 if (ifbdev && &fb->base == ifbdev->helper.fb)
1619 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1623 fb->base.bits_per_pixel,
1624 atomic_read(&fb->base.refcount.refcount));
1625 describe_obj(m, fb->obj);
1628 mutex_unlock(&dev->mode_config.fb_lock);
1633 static int i915_context_status(struct seq_file *m, void *unused)
1635 struct drm_info_node *node = m->private;
1636 struct drm_device *dev = node->minor->dev;
1637 struct drm_i915_private *dev_priv = dev->dev_private;
1638 struct intel_engine_cs *ring;
1639 struct intel_context *ctx;
1642 ret = mutex_lock_interruptible(&dev->struct_mutex);
1646 if (dev_priv->ips.pwrctx) {
1647 seq_puts(m, "power context ");
1648 describe_obj(m, dev_priv->ips.pwrctx);
1652 if (dev_priv->ips.renderctx) {
1653 seq_puts(m, "render context ");
1654 describe_obj(m, dev_priv->ips.renderctx);
1658 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1659 if (ctx->legacy_hw_ctx.rcs_state == NULL)
1662 seq_puts(m, "HW context ");
1663 describe_ctx(m, ctx);
1664 for_each_ring(ring, dev_priv, i)
1665 if (ring->default_context == ctx)
1666 seq_printf(m, "(default context %s) ", ring->name);
1668 describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1672 mutex_unlock(&dev->struct_mutex);
1677 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1679 struct drm_info_node *node = m->private;
1680 struct drm_device *dev = node->minor->dev;
1681 struct drm_i915_private *dev_priv = dev->dev_private;
1682 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
1684 spin_lock_irq(&dev_priv->uncore.lock);
1685 if (IS_VALLEYVIEW(dev)) {
1686 fw_rendercount = dev_priv->uncore.fw_rendercount;
1687 fw_mediacount = dev_priv->uncore.fw_mediacount;
1689 forcewake_count = dev_priv->uncore.forcewake_count;
1690 spin_unlock_irq(&dev_priv->uncore.lock);
1692 if (IS_VALLEYVIEW(dev)) {
1693 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
1694 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
1696 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1701 static const char *swizzle_string(unsigned swizzle)
1704 case I915_BIT_6_SWIZZLE_NONE:
1706 case I915_BIT_6_SWIZZLE_9:
1708 case I915_BIT_6_SWIZZLE_9_10:
1709 return "bit9/bit10";
1710 case I915_BIT_6_SWIZZLE_9_11:
1711 return "bit9/bit11";
1712 case I915_BIT_6_SWIZZLE_9_10_11:
1713 return "bit9/bit10/bit11";
1714 case I915_BIT_6_SWIZZLE_9_17:
1715 return "bit9/bit17";
1716 case I915_BIT_6_SWIZZLE_9_10_17:
1717 return "bit9/bit10/bit17";
1718 case I915_BIT_6_SWIZZLE_UNKNOWN:
1725 static int i915_swizzle_info(struct seq_file *m, void *data)
1727 struct drm_info_node *node = m->private;
1728 struct drm_device *dev = node->minor->dev;
1729 struct drm_i915_private *dev_priv = dev->dev_private;
1732 ret = mutex_lock_interruptible(&dev->struct_mutex);
1735 intel_runtime_pm_get(dev_priv);
1737 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1738 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1739 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1740 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1742 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1743 seq_printf(m, "DDC = 0x%08x\n",
1745 seq_printf(m, "C0DRB3 = 0x%04x\n",
1746 I915_READ16(C0DRB3));
1747 seq_printf(m, "C1DRB3 = 0x%04x\n",
1748 I915_READ16(C1DRB3));
1749 } else if (INTEL_INFO(dev)->gen >= 6) {
1750 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1751 I915_READ(MAD_DIMM_C0));
1752 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1753 I915_READ(MAD_DIMM_C1));
1754 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1755 I915_READ(MAD_DIMM_C2));
1756 seq_printf(m, "TILECTL = 0x%08x\n",
1757 I915_READ(TILECTL));
1759 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1760 I915_READ(GAMTARBMODE));
1762 seq_printf(m, "ARB_MODE = 0x%08x\n",
1763 I915_READ(ARB_MODE));
1764 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1765 I915_READ(DISP_ARB_CTL));
1767 intel_runtime_pm_put(dev_priv);
1768 mutex_unlock(&dev->struct_mutex);
1773 static int per_file_ctx(int id, void *ptr, void *data)
1775 struct intel_context *ctx = ptr;
1776 struct seq_file *m = data;
1777 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
1779 if (i915_gem_context_is_default(ctx))
1780 seq_puts(m, " default context:\n");
1782 seq_printf(m, " context %d:\n", ctx->user_handle);
1783 ppgtt->debug_dump(ppgtt, m);
1788 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1790 struct drm_i915_private *dev_priv = dev->dev_private;
1791 struct intel_engine_cs *ring;
1792 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1798 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
1799 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
1800 for_each_ring(ring, dev_priv, unused) {
1801 seq_printf(m, "%s\n", ring->name);
1802 for (i = 0; i < 4; i++) {
1803 u32 offset = 0x270 + i * 8;
1804 u64 pdp = I915_READ(ring->mmio_base + offset + 4);
1806 pdp |= I915_READ(ring->mmio_base + offset);
1807 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1812 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1814 struct drm_i915_private *dev_priv = dev->dev_private;
1815 struct intel_engine_cs *ring;
1816 struct drm_file *file;
1819 if (INTEL_INFO(dev)->gen == 6)
1820 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1822 for_each_ring(ring, dev_priv, i) {
1823 seq_printf(m, "%s\n", ring->name);
1824 if (INTEL_INFO(dev)->gen == 7)
1825 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1826 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1827 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1828 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1830 if (dev_priv->mm.aliasing_ppgtt) {
1831 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1833 seq_puts(m, "aliasing PPGTT:\n");
1834 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1836 ppgtt->debug_dump(ppgtt, m);
1840 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
1841 struct drm_i915_file_private *file_priv = file->driver_priv;
1843 seq_printf(m, "proc: %s\n",
1844 get_pid_task(file->pid, PIDTYPE_PID)->comm);
1845 idr_for_each(&file_priv->context_idr, per_file_ctx, m);
1847 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1850 static int i915_ppgtt_info(struct seq_file *m, void *data)
1852 struct drm_info_node *node = m->private;
1853 struct drm_device *dev = node->minor->dev;
1854 struct drm_i915_private *dev_priv = dev->dev_private;
1856 int ret = mutex_lock_interruptible(&dev->struct_mutex);
1859 intel_runtime_pm_get(dev_priv);
1861 if (INTEL_INFO(dev)->gen >= 8)
1862 gen8_ppgtt_info(m, dev);
1863 else if (INTEL_INFO(dev)->gen >= 6)
1864 gen6_ppgtt_info(m, dev);
1866 intel_runtime_pm_put(dev_priv);
1867 mutex_unlock(&dev->struct_mutex);
1872 static int i915_llc(struct seq_file *m, void *data)
1874 struct drm_info_node *node = m->private;
1875 struct drm_device *dev = node->minor->dev;
1876 struct drm_i915_private *dev_priv = dev->dev_private;
1878 /* Size calculation for LLC is a bit of a pain. Ignore for now. */
1879 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1880 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1885 static int i915_edp_psr_status(struct seq_file *m, void *data)
1887 struct drm_info_node *node = m->private;
1888 struct drm_device *dev = node->minor->dev;
1889 struct drm_i915_private *dev_priv = dev->dev_private;
1891 bool enabled = false;
1893 intel_runtime_pm_get(dev_priv);
1895 mutex_lock(&dev_priv->psr.lock);
1896 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1897 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1898 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
1899 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
1900 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
1901 dev_priv->psr.busy_frontbuffer_bits);
1902 seq_printf(m, "Re-enable work scheduled: %s\n",
1903 yesno(work_busy(&dev_priv->psr.work.work)));
1905 enabled = HAS_PSR(dev) &&
1906 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1907 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
1910 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1911 EDP_PSR_PERF_CNT_MASK;
1912 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1913 mutex_unlock(&dev_priv->psr.lock);
1915 intel_runtime_pm_put(dev_priv);
1919 static int i915_sink_crc(struct seq_file *m, void *data)
1921 struct drm_info_node *node = m->private;
1922 struct drm_device *dev = node->minor->dev;
1923 struct intel_encoder *encoder;
1924 struct intel_connector *connector;
1925 struct intel_dp *intel_dp = NULL;
1929 drm_modeset_lock_all(dev);
1930 list_for_each_entry(connector, &dev->mode_config.connector_list,
1933 if (connector->base.dpms != DRM_MODE_DPMS_ON)
1936 if (!connector->base.encoder)
1939 encoder = to_intel_encoder(connector->base.encoder);
1940 if (encoder->type != INTEL_OUTPUT_EDP)
1943 intel_dp = enc_to_intel_dp(&encoder->base);
1945 ret = intel_dp_sink_crc(intel_dp, crc);
1949 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
1950 crc[0], crc[1], crc[2],
1951 crc[3], crc[4], crc[5]);
1956 drm_modeset_unlock_all(dev);
1960 static int i915_energy_uJ(struct seq_file *m, void *data)
1962 struct drm_info_node *node = m->private;
1963 struct drm_device *dev = node->minor->dev;
1964 struct drm_i915_private *dev_priv = dev->dev_private;
1968 if (INTEL_INFO(dev)->gen < 6)
1971 intel_runtime_pm_get(dev_priv);
1973 rdmsrl(MSR_RAPL_POWER_UNIT, power);
1974 power = (power & 0x1f00) >> 8;
1975 units = 1000000 / (1 << power); /* convert to uJ */
1976 power = I915_READ(MCH_SECP_NRG_STTS);
1979 intel_runtime_pm_put(dev_priv);
1981 seq_printf(m, "%llu", (long long unsigned)power);
1986 static int i915_pc8_status(struct seq_file *m, void *unused)
1988 struct drm_info_node *node = m->private;
1989 struct drm_device *dev = node->minor->dev;
1990 struct drm_i915_private *dev_priv = dev->dev_private;
1992 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
1993 seq_puts(m, "not supported\n");
1997 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
1998 seq_printf(m, "IRQs disabled: %s\n",
1999 yesno(!intel_irqs_enabled(dev_priv)));
2004 static const char *power_domain_str(enum intel_display_power_domain domain)
2007 case POWER_DOMAIN_PIPE_A:
2009 case POWER_DOMAIN_PIPE_B:
2011 case POWER_DOMAIN_PIPE_C:
2013 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2014 return "PIPE_A_PANEL_FITTER";
2015 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2016 return "PIPE_B_PANEL_FITTER";
2017 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2018 return "PIPE_C_PANEL_FITTER";
2019 case POWER_DOMAIN_TRANSCODER_A:
2020 return "TRANSCODER_A";
2021 case POWER_DOMAIN_TRANSCODER_B:
2022 return "TRANSCODER_B";
2023 case POWER_DOMAIN_TRANSCODER_C:
2024 return "TRANSCODER_C";
2025 case POWER_DOMAIN_TRANSCODER_EDP:
2026 return "TRANSCODER_EDP";
2027 case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2028 return "PORT_DDI_A_2_LANES";
2029 case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2030 return "PORT_DDI_A_4_LANES";
2031 case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2032 return "PORT_DDI_B_2_LANES";
2033 case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2034 return "PORT_DDI_B_4_LANES";
2035 case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2036 return "PORT_DDI_C_2_LANES";
2037 case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2038 return "PORT_DDI_C_4_LANES";
2039 case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2040 return "PORT_DDI_D_2_LANES";
2041 case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2042 return "PORT_DDI_D_4_LANES";
2043 case POWER_DOMAIN_PORT_DSI:
2045 case POWER_DOMAIN_PORT_CRT:
2047 case POWER_DOMAIN_PORT_OTHER:
2048 return "PORT_OTHER";
2049 case POWER_DOMAIN_VGA:
2051 case POWER_DOMAIN_AUDIO:
2053 case POWER_DOMAIN_PLLS:
2055 case POWER_DOMAIN_INIT:
2063 static int i915_power_domain_info(struct seq_file *m, void *unused)
2065 struct drm_info_node *node = m->private;
2066 struct drm_device *dev = node->minor->dev;
2067 struct drm_i915_private *dev_priv = dev->dev_private;
2068 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2071 mutex_lock(&power_domains->lock);
2073 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2074 for (i = 0; i < power_domains->power_well_count; i++) {
2075 struct i915_power_well *power_well;
2076 enum intel_display_power_domain power_domain;
2078 power_well = &power_domains->power_wells[i];
2079 seq_printf(m, "%-25s %d\n", power_well->name,
2082 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2084 if (!(BIT(power_domain) & power_well->domains))
2087 seq_printf(m, " %-23s %d\n",
2088 power_domain_str(power_domain),
2089 power_domains->domain_use_count[power_domain]);
2093 mutex_unlock(&power_domains->lock);
2098 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2099 struct drm_display_mode *mode)
2103 for (i = 0; i < tabs; i++)
2106 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2107 mode->base.id, mode->name,
2108 mode->vrefresh, mode->clock,
2109 mode->hdisplay, mode->hsync_start,
2110 mode->hsync_end, mode->htotal,
2111 mode->vdisplay, mode->vsync_start,
2112 mode->vsync_end, mode->vtotal,
2113 mode->type, mode->flags);
2116 static void intel_encoder_info(struct seq_file *m,
2117 struct intel_crtc *intel_crtc,
2118 struct intel_encoder *intel_encoder)
2120 struct drm_info_node *node = m->private;
2121 struct drm_device *dev = node->minor->dev;
2122 struct drm_crtc *crtc = &intel_crtc->base;
2123 struct intel_connector *intel_connector;
2124 struct drm_encoder *encoder;
2126 encoder = &intel_encoder->base;
2127 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2128 encoder->base.id, encoder->name);
2129 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2130 struct drm_connector *connector = &intel_connector->base;
2131 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2134 drm_get_connector_status_name(connector->status));
2135 if (connector->status == connector_status_connected) {
2136 struct drm_display_mode *mode = &crtc->mode;
2137 seq_printf(m, ", mode:\n");
2138 intel_seq_print_mode(m, 2, mode);
2145 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2147 struct drm_info_node *node = m->private;
2148 struct drm_device *dev = node->minor->dev;
2149 struct drm_crtc *crtc = &intel_crtc->base;
2150 struct intel_encoder *intel_encoder;
2152 if (crtc->primary->fb)
2153 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2154 crtc->primary->fb->base.id, crtc->x, crtc->y,
2155 crtc->primary->fb->width, crtc->primary->fb->height);
2157 seq_puts(m, "\tprimary plane disabled\n");
2158 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2159 intel_encoder_info(m, intel_crtc, intel_encoder);
2162 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2164 struct drm_display_mode *mode = panel->fixed_mode;
2166 seq_printf(m, "\tfixed mode:\n");
2167 intel_seq_print_mode(m, 2, mode);
2170 static void intel_dp_info(struct seq_file *m,
2171 struct intel_connector *intel_connector)
2173 struct intel_encoder *intel_encoder = intel_connector->encoder;
2174 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2176 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2177 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2179 if (intel_encoder->type == INTEL_OUTPUT_EDP)
2180 intel_panel_info(m, &intel_connector->panel);
2183 static void intel_hdmi_info(struct seq_file *m,
2184 struct intel_connector *intel_connector)
2186 struct intel_encoder *intel_encoder = intel_connector->encoder;
2187 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2189 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2193 static void intel_lvds_info(struct seq_file *m,
2194 struct intel_connector *intel_connector)
2196 intel_panel_info(m, &intel_connector->panel);
2199 static void intel_connector_info(struct seq_file *m,
2200 struct drm_connector *connector)
2202 struct intel_connector *intel_connector = to_intel_connector(connector);
2203 struct intel_encoder *intel_encoder = intel_connector->encoder;
2204 struct drm_display_mode *mode;
2206 seq_printf(m, "connector %d: type %s, status: %s\n",
2207 connector->base.id, connector->name,
2208 drm_get_connector_status_name(connector->status));
2209 if (connector->status == connector_status_connected) {
2210 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2211 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2212 connector->display_info.width_mm,
2213 connector->display_info.height_mm);
2214 seq_printf(m, "\tsubpixel order: %s\n",
2215 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2216 seq_printf(m, "\tCEA rev: %d\n",
2217 connector->display_info.cea_rev);
2219 if (intel_encoder) {
2220 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2221 intel_encoder->type == INTEL_OUTPUT_EDP)
2222 intel_dp_info(m, intel_connector);
2223 else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2224 intel_hdmi_info(m, intel_connector);
2225 else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2226 intel_lvds_info(m, intel_connector);
2229 seq_printf(m, "\tmodes:\n");
2230 list_for_each_entry(mode, &connector->modes, head)
2231 intel_seq_print_mode(m, 2, mode);
2234 static bool cursor_active(struct drm_device *dev, int pipe)
2236 struct drm_i915_private *dev_priv = dev->dev_private;
2239 if (IS_845G(dev) || IS_I865G(dev))
2240 state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2242 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2247 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2249 struct drm_i915_private *dev_priv = dev->dev_private;
2252 pos = I915_READ(CURPOS(pipe));
2254 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2255 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2258 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2259 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2262 return cursor_active(dev, pipe);
2265 static int i915_display_info(struct seq_file *m, void *unused)
2267 struct drm_info_node *node = m->private;
2268 struct drm_device *dev = node->minor->dev;
2269 struct drm_i915_private *dev_priv = dev->dev_private;
2270 struct intel_crtc *crtc;
2271 struct drm_connector *connector;
2273 intel_runtime_pm_get(dev_priv);
2274 drm_modeset_lock_all(dev);
2275 seq_printf(m, "CRTC info\n");
2276 seq_printf(m, "---------\n");
2277 for_each_intel_crtc(dev, crtc) {
2281 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2282 crtc->base.base.id, pipe_name(crtc->pipe),
2283 yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
2285 intel_crtc_info(m, crtc);
2287 active = cursor_position(dev, crtc->pipe, &x, &y);
2288 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2289 yesno(crtc->cursor_base),
2290 x, y, crtc->cursor_width, crtc->cursor_height,
2291 crtc->cursor_addr, yesno(active));
2294 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2295 yesno(!crtc->cpu_fifo_underrun_disabled),
2296 yesno(!crtc->pch_fifo_underrun_disabled));
2299 seq_printf(m, "\n");
2300 seq_printf(m, "Connector info\n");
2301 seq_printf(m, "--------------\n");
2302 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2303 intel_connector_info(m, connector);
2305 drm_modeset_unlock_all(dev);
2306 intel_runtime_pm_put(dev_priv);
2311 static int i915_semaphore_status(struct seq_file *m, void *unused)
2313 struct drm_info_node *node = (struct drm_info_node *) m->private;
2314 struct drm_device *dev = node->minor->dev;
2315 struct drm_i915_private *dev_priv = dev->dev_private;
2316 struct intel_engine_cs *ring;
2317 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
2320 if (!i915_semaphore_is_enabled(dev)) {
2321 seq_puts(m, "Semaphores are disabled\n");
2325 ret = mutex_lock_interruptible(&dev->struct_mutex);
2328 intel_runtime_pm_get(dev_priv);
2330 if (IS_BROADWELL(dev)) {
2334 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
2336 seqno = (uint64_t *)kmap_atomic(page);
2337 for_each_ring(ring, dev_priv, i) {
2340 seq_printf(m, "%s\n", ring->name);
2342 seq_puts(m, " Last signal:");
2343 for (j = 0; j < num_rings; j++) {
2344 offset = i * I915_NUM_RINGS + j;
2345 seq_printf(m, "0x%08llx (0x%02llx) ",
2346 seqno[offset], offset * 8);
2350 seq_puts(m, " Last wait: ");
2351 for (j = 0; j < num_rings; j++) {
2352 offset = i + (j * I915_NUM_RINGS);
2353 seq_printf(m, "0x%08llx (0x%02llx) ",
2354 seqno[offset], offset * 8);
2359 kunmap_atomic(seqno);
2361 seq_puts(m, " Last signal:");
2362 for_each_ring(ring, dev_priv, i)
2363 for (j = 0; j < num_rings; j++)
2364 seq_printf(m, "0x%08x\n",
2365 I915_READ(ring->semaphore.mbox.signal[j]));
2369 seq_puts(m, "\nSync seqno:\n");
2370 for_each_ring(ring, dev_priv, i) {
2371 for (j = 0; j < num_rings; j++) {
2372 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
2378 intel_runtime_pm_put(dev_priv);
2379 mutex_unlock(&dev->struct_mutex);
2383 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2385 struct drm_info_node *node = (struct drm_info_node *) m->private;
2386 struct drm_device *dev = node->minor->dev;
2387 struct drm_i915_private *dev_priv = dev->dev_private;
2390 drm_modeset_lock_all(dev);
2391 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2392 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2394 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2395 seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
2396 pll->active, yesno(pll->on));
2397 seq_printf(m, " tracked hardware state:\n");
2398 seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
2399 seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
2400 seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
2401 seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
2402 seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
2404 drm_modeset_unlock_all(dev);
2409 struct pipe_crc_info {
2411 struct drm_device *dev;
2415 static int i915_dp_mst_info(struct seq_file *m, void *unused)
2417 struct drm_info_node *node = (struct drm_info_node *) m->private;
2418 struct drm_device *dev = node->minor->dev;
2419 struct drm_encoder *encoder;
2420 struct intel_encoder *intel_encoder;
2421 struct intel_digital_port *intel_dig_port;
2422 drm_modeset_lock_all(dev);
2423 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2424 intel_encoder = to_intel_encoder(encoder);
2425 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
2427 intel_dig_port = enc_to_dig_port(encoder);
2428 if (!intel_dig_port->dp.can_mst)
2431 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
2433 drm_modeset_unlock_all(dev);
2437 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
2439 struct pipe_crc_info *info = inode->i_private;
2440 struct drm_i915_private *dev_priv = info->dev->dev_private;
2441 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2443 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
2446 spin_lock_irq(&pipe_crc->lock);
2448 if (pipe_crc->opened) {
2449 spin_unlock_irq(&pipe_crc->lock);
2450 return -EBUSY; /* already open */
2453 pipe_crc->opened = true;
2454 filep->private_data = inode->i_private;
2456 spin_unlock_irq(&pipe_crc->lock);
2461 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
2463 struct pipe_crc_info *info = inode->i_private;
2464 struct drm_i915_private *dev_priv = info->dev->dev_private;
2465 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2467 spin_lock_irq(&pipe_crc->lock);
2468 pipe_crc->opened = false;
2469 spin_unlock_irq(&pipe_crc->lock);
2474 /* (6 fields, 8 chars each, space separated (5) + '\n') */
2475 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
2476 /* account for \'0' */
2477 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
2479 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
2481 assert_spin_locked(&pipe_crc->lock);
2482 return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
2483 INTEL_PIPE_CRC_ENTRIES_NR);
2487 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
2490 struct pipe_crc_info *info = filep->private_data;
2491 struct drm_device *dev = info->dev;
2492 struct drm_i915_private *dev_priv = dev->dev_private;
2493 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2494 char buf[PIPE_CRC_BUFFER_LEN];
2495 int head, tail, n_entries, n;
2499 * Don't allow user space to provide buffers not big enough to hold
2502 if (count < PIPE_CRC_LINE_LEN)
2505 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
2508 /* nothing to read */
2509 spin_lock_irq(&pipe_crc->lock);
2510 while (pipe_crc_data_count(pipe_crc) == 0) {
2513 if (filep->f_flags & O_NONBLOCK) {
2514 spin_unlock_irq(&pipe_crc->lock);
2518 ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
2519 pipe_crc_data_count(pipe_crc), pipe_crc->lock);
2521 spin_unlock_irq(&pipe_crc->lock);
2526 /* We now have one or more entries to read */
2527 head = pipe_crc->head;
2528 tail = pipe_crc->tail;
2529 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
2530 count / PIPE_CRC_LINE_LEN);
2531 spin_unlock_irq(&pipe_crc->lock);
2536 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
2539 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
2540 "%8u %8x %8x %8x %8x %8x\n",
2541 entry->frame, entry->crc[0],
2542 entry->crc[1], entry->crc[2],
2543 entry->crc[3], entry->crc[4]);
2545 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
2546 buf, PIPE_CRC_LINE_LEN);
2547 if (ret == PIPE_CRC_LINE_LEN)
2550 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
2551 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
2553 } while (--n_entries);
2555 spin_lock_irq(&pipe_crc->lock);
2556 pipe_crc->tail = tail;
2557 spin_unlock_irq(&pipe_crc->lock);
2562 static const struct file_operations i915_pipe_crc_fops = {
2563 .owner = THIS_MODULE,
2564 .open = i915_pipe_crc_open,
2565 .read = i915_pipe_crc_read,
2566 .release = i915_pipe_crc_release,
2569 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
2571 .name = "i915_pipe_A_crc",
2575 .name = "i915_pipe_B_crc",
2579 .name = "i915_pipe_C_crc",
2584 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
2587 struct drm_device *dev = minor->dev;
2589 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
2592 ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2593 &i915_pipe_crc_fops);
2597 return drm_add_fake_info_node(minor, ent, info);
2600 static const char * const pipe_crc_sources[] = {
2613 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
2615 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
2616 return pipe_crc_sources[source];
2619 static int display_crc_ctl_show(struct seq_file *m, void *data)
2621 struct drm_device *dev = m->private;
2622 struct drm_i915_private *dev_priv = dev->dev_private;
2625 for (i = 0; i < I915_MAX_PIPES; i++)
2626 seq_printf(m, "%c %s\n", pipe_name(i),
2627 pipe_crc_source_name(dev_priv->pipe_crc[i].source));
2632 static int display_crc_ctl_open(struct inode *inode, struct file *file)
2634 struct drm_device *dev = inode->i_private;
2636 return single_open(file, display_crc_ctl_show, dev);
2639 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2642 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2643 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2646 case INTEL_PIPE_CRC_SOURCE_PIPE:
2647 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
2649 case INTEL_PIPE_CRC_SOURCE_NONE:
2659 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2660 enum intel_pipe_crc_source *source)
2662 struct intel_encoder *encoder;
2663 struct intel_crtc *crtc;
2664 struct intel_digital_port *dig_port;
2667 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2669 drm_modeset_lock_all(dev);
2670 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2672 if (!encoder->base.crtc)
2675 crtc = to_intel_crtc(encoder->base.crtc);
2677 if (crtc->pipe != pipe)
2680 switch (encoder->type) {
2681 case INTEL_OUTPUT_TVOUT:
2682 *source = INTEL_PIPE_CRC_SOURCE_TV;
2684 case INTEL_OUTPUT_DISPLAYPORT:
2685 case INTEL_OUTPUT_EDP:
2686 dig_port = enc_to_dig_port(&encoder->base);
2687 switch (dig_port->port) {
2689 *source = INTEL_PIPE_CRC_SOURCE_DP_B;
2692 *source = INTEL_PIPE_CRC_SOURCE_DP_C;
2695 *source = INTEL_PIPE_CRC_SOURCE_DP_D;
2698 WARN(1, "nonexisting DP port %c\n",
2699 port_name(dig_port->port));
2705 drm_modeset_unlock_all(dev);
2710 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2712 enum intel_pipe_crc_source *source,
2715 struct drm_i915_private *dev_priv = dev->dev_private;
2716 bool need_stable_symbols = false;
2718 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2719 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2725 case INTEL_PIPE_CRC_SOURCE_PIPE:
2726 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2728 case INTEL_PIPE_CRC_SOURCE_DP_B:
2729 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
2730 need_stable_symbols = true;
2732 case INTEL_PIPE_CRC_SOURCE_DP_C:
2733 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
2734 need_stable_symbols = true;
2736 case INTEL_PIPE_CRC_SOURCE_NONE:
2744 * When the pipe CRC tap point is after the transcoders we need
2745 * to tweak symbol-level features to produce a deterministic series of
2746 * symbols for a given frame. We need to reset those features only once
2747 * a frame (instead of every nth symbol):
2748 * - DC-balance: used to ensure a better clock recovery from the data
2750 * - DisplayPort scrambling: used for EMI reduction
2752 if (need_stable_symbols) {
2753 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2755 tmp |= DC_BALANCE_RESET_VLV;
2757 tmp |= PIPE_A_SCRAMBLE_RESET;
2759 tmp |= PIPE_B_SCRAMBLE_RESET;
2761 I915_WRITE(PORT_DFT2_G4X, tmp);
2767 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
2769 enum intel_pipe_crc_source *source,
2772 struct drm_i915_private *dev_priv = dev->dev_private;
2773 bool need_stable_symbols = false;
2775 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2776 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2782 case INTEL_PIPE_CRC_SOURCE_PIPE:
2783 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
2785 case INTEL_PIPE_CRC_SOURCE_TV:
2786 if (!SUPPORTS_TV(dev))
2788 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
2790 case INTEL_PIPE_CRC_SOURCE_DP_B:
2793 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
2794 need_stable_symbols = true;
2796 case INTEL_PIPE_CRC_SOURCE_DP_C:
2799 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
2800 need_stable_symbols = true;
2802 case INTEL_PIPE_CRC_SOURCE_DP_D:
2805 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
2806 need_stable_symbols = true;
2808 case INTEL_PIPE_CRC_SOURCE_NONE:
2816 * When the pipe CRC tap point is after the transcoders we need
2817 * to tweak symbol-level features to produce a deterministic series of
2818 * symbols for a given frame. We need to reset those features only once
2819 * a frame (instead of every nth symbol):
2820 * - DC-balance: used to ensure a better clock recovery from the data
2822 * - DisplayPort scrambling: used for EMI reduction
2824 if (need_stable_symbols) {
2825 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2827 WARN_ON(!IS_G4X(dev));
2829 I915_WRITE(PORT_DFT_I9XX,
2830 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
2833 tmp |= PIPE_A_SCRAMBLE_RESET;
2835 tmp |= PIPE_B_SCRAMBLE_RESET;
2837 I915_WRITE(PORT_DFT2_G4X, tmp);
2843 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
2846 struct drm_i915_private *dev_priv = dev->dev_private;
2847 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2850 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2852 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2853 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
2854 tmp &= ~DC_BALANCE_RESET_VLV;
2855 I915_WRITE(PORT_DFT2_G4X, tmp);
2859 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
2862 struct drm_i915_private *dev_priv = dev->dev_private;
2863 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2866 tmp &= ~PIPE_A_SCRAMBLE_RESET;
2868 tmp &= ~PIPE_B_SCRAMBLE_RESET;
2869 I915_WRITE(PORT_DFT2_G4X, tmp);
2871 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
2872 I915_WRITE(PORT_DFT_I9XX,
2873 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
2877 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2880 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2881 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2884 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2885 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
2887 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2888 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
2890 case INTEL_PIPE_CRC_SOURCE_PIPE:
2891 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
2893 case INTEL_PIPE_CRC_SOURCE_NONE:
2903 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
2905 struct drm_i915_private *dev_priv = dev->dev_private;
2906 struct intel_crtc *crtc =
2907 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
2909 drm_modeset_lock_all(dev);
2911 * If we use the eDP transcoder we need to make sure that we don't
2912 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
2913 * relevant on hsw with pipe A when using the always-on power well
2916 if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
2917 !crtc->config.pch_pfit.enabled) {
2918 crtc->config.pch_pfit.force_thru = true;
2920 intel_display_power_get(dev_priv,
2921 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
2923 dev_priv->display.crtc_disable(&crtc->base);
2924 dev_priv->display.crtc_enable(&crtc->base);
2926 drm_modeset_unlock_all(dev);
2929 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
2931 struct drm_i915_private *dev_priv = dev->dev_private;
2932 struct intel_crtc *crtc =
2933 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
2935 drm_modeset_lock_all(dev);
2937 * If we use the eDP transcoder we need to make sure that we don't
2938 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
2939 * relevant on hsw with pipe A when using the always-on power well
2942 if (crtc->config.pch_pfit.force_thru) {
2943 crtc->config.pch_pfit.force_thru = false;
2945 dev_priv->display.crtc_disable(&crtc->base);
2946 dev_priv->display.crtc_enable(&crtc->base);
2948 intel_display_power_put(dev_priv,
2949 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
2951 drm_modeset_unlock_all(dev);
2954 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
2956 enum intel_pipe_crc_source *source,
2959 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2960 *source = INTEL_PIPE_CRC_SOURCE_PF;
2963 case INTEL_PIPE_CRC_SOURCE_PLANE1:
2964 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
2966 case INTEL_PIPE_CRC_SOURCE_PLANE2:
2967 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
2969 case INTEL_PIPE_CRC_SOURCE_PF:
2970 if (IS_HASWELL(dev) && pipe == PIPE_A)
2971 hsw_trans_edp_pipe_A_crc_wa(dev);
2973 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
2975 case INTEL_PIPE_CRC_SOURCE_NONE:
2985 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2986 enum intel_pipe_crc_source source)
2988 struct drm_i915_private *dev_priv = dev->dev_private;
2989 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
2990 u32 val = 0; /* shut up gcc */
2993 if (pipe_crc->source == source)
2996 /* forbid changing the source without going back to 'none' */
2997 if (pipe_crc->source && source)
3001 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
3002 else if (INTEL_INFO(dev)->gen < 5)
3003 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3004 else if (IS_VALLEYVIEW(dev))
3005 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3006 else if (IS_GEN5(dev) || IS_GEN6(dev))
3007 ret = ilk_pipe_crc_ctl_reg(&source, &val);
3009 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3014 /* none -> real source transition */
3016 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
3017 pipe_name(pipe), pipe_crc_source_name(source));
3019 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
3020 INTEL_PIPE_CRC_ENTRIES_NR,
3022 if (!pipe_crc->entries)
3025 spin_lock_irq(&pipe_crc->lock);
3028 spin_unlock_irq(&pipe_crc->lock);
3031 pipe_crc->source = source;
3033 I915_WRITE(PIPE_CRC_CTL(pipe), val);
3034 POSTING_READ(PIPE_CRC_CTL(pipe));
3036 /* real source -> none transition */
3037 if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
3038 struct intel_pipe_crc_entry *entries;
3039 struct intel_crtc *crtc =
3040 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
3042 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
3045 drm_modeset_lock(&crtc->base.mutex, NULL);
3047 intel_wait_for_vblank(dev, pipe);
3048 drm_modeset_unlock(&crtc->base.mutex);
3050 spin_lock_irq(&pipe_crc->lock);
3051 entries = pipe_crc->entries;
3052 pipe_crc->entries = NULL;
3053 spin_unlock_irq(&pipe_crc->lock);
3058 g4x_undo_pipe_scramble_reset(dev, pipe);
3059 else if (IS_VALLEYVIEW(dev))
3060 vlv_undo_pipe_scramble_reset(dev, pipe);
3061 else if (IS_HASWELL(dev) && pipe == PIPE_A)
3062 hsw_undo_trans_edp_pipe_A_crc_wa(dev);
3069 * Parse pipe CRC command strings:
3070 * command: wsp* object wsp+ name wsp+ source wsp*
3073 * source: (none | plane1 | plane2 | pf)
3074 * wsp: (#0x20 | #0x9 | #0xA)+
3077 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
3078 * "pipe A none" -> Stop CRC
3080 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
3087 /* skip leading white space */
3088 buf = skip_spaces(buf);
3090 break; /* end of buffer */
3092 /* find end of word */
3093 for (end = buf; *end && !isspace(*end); end++)
3096 if (n_words == max_words) {
3097 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
3099 return -EINVAL; /* ran out of words[] before bytes */
3104 words[n_words++] = buf;
3111 enum intel_pipe_crc_object {
3112 PIPE_CRC_OBJECT_PIPE,
3115 static const char * const pipe_crc_objects[] = {
3120 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
3124 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
3125 if (!strcmp(buf, pipe_crc_objects[i])) {
3133 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
3135 const char name = buf[0];
3137 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
3146 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
3150 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
3151 if (!strcmp(buf, pipe_crc_sources[i])) {
3159 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
3163 char *words[N_WORDS];
3165 enum intel_pipe_crc_object object;
3166 enum intel_pipe_crc_source source;
3168 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
3169 if (n_words != N_WORDS) {
3170 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3175 if (display_crc_ctl_parse_object(words[0], &object) < 0) {
3176 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
3180 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
3181 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
3185 if (display_crc_ctl_parse_source(words[2], &source) < 0) {
3186 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
3190 return pipe_crc_set_source(dev, pipe, source);
3193 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
3194 size_t len, loff_t *offp)
3196 struct seq_file *m = file->private_data;
3197 struct drm_device *dev = m->private;
3204 if (len > PAGE_SIZE - 1) {
3205 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3210 tmpbuf = kmalloc(len + 1, GFP_KERNEL);
3214 if (copy_from_user(tmpbuf, ubuf, len)) {
3220 ret = display_crc_ctl_parse(dev, tmpbuf, len);
3231 static const struct file_operations i915_display_crc_ctl_fops = {
3232 .owner = THIS_MODULE,
3233 .open = display_crc_ctl_open,
3235 .llseek = seq_lseek,
3236 .release = single_release,
3237 .write = display_crc_ctl_write
3240 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3242 struct drm_device *dev = m->private;
3243 int num_levels = ilk_wm_max_level(dev) + 1;
3246 drm_modeset_lock_all(dev);
3248 for (level = 0; level < num_levels; level++) {
3249 unsigned int latency = wm[level];
3251 /* WM1+ latency values in 0.5us units */
3255 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3257 latency / 10, latency % 10);
3260 drm_modeset_unlock_all(dev);
3263 static int pri_wm_latency_show(struct seq_file *m, void *data)
3265 struct drm_device *dev = m->private;
3267 wm_latency_show(m, to_i915(dev)->wm.pri_latency);
3272 static int spr_wm_latency_show(struct seq_file *m, void *data)
3274 struct drm_device *dev = m->private;
3276 wm_latency_show(m, to_i915(dev)->wm.spr_latency);
3281 static int cur_wm_latency_show(struct seq_file *m, void *data)
3283 struct drm_device *dev = m->private;
3285 wm_latency_show(m, to_i915(dev)->wm.cur_latency);
3290 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3292 struct drm_device *dev = inode->i_private;
3294 if (HAS_GMCH_DISPLAY(dev))
3297 return single_open(file, pri_wm_latency_show, dev);
3300 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3302 struct drm_device *dev = inode->i_private;
3304 if (HAS_GMCH_DISPLAY(dev))
3307 return single_open(file, spr_wm_latency_show, dev);
3310 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3312 struct drm_device *dev = inode->i_private;
3314 if (HAS_GMCH_DISPLAY(dev))
3317 return single_open(file, cur_wm_latency_show, dev);
3320 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3321 size_t len, loff_t *offp, uint16_t wm[5])
3323 struct seq_file *m = file->private_data;
3324 struct drm_device *dev = m->private;
3325 uint16_t new[5] = { 0 };
3326 int num_levels = ilk_wm_max_level(dev) + 1;
3331 if (len >= sizeof(tmp))
3334 if (copy_from_user(tmp, ubuf, len))
3339 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
3340 if (ret != num_levels)
3343 drm_modeset_lock_all(dev);
3345 for (level = 0; level < num_levels; level++)
3346 wm[level] = new[level];
3348 drm_modeset_unlock_all(dev);
3354 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3355 size_t len, loff_t *offp)
3357 struct seq_file *m = file->private_data;
3358 struct drm_device *dev = m->private;
3360 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
3363 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3364 size_t len, loff_t *offp)
3366 struct seq_file *m = file->private_data;
3367 struct drm_device *dev = m->private;
3369 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
3372 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3373 size_t len, loff_t *offp)
3375 struct seq_file *m = file->private_data;
3376 struct drm_device *dev = m->private;
3378 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
3381 static const struct file_operations i915_pri_wm_latency_fops = {
3382 .owner = THIS_MODULE,
3383 .open = pri_wm_latency_open,
3385 .llseek = seq_lseek,
3386 .release = single_release,
3387 .write = pri_wm_latency_write
3390 static const struct file_operations i915_spr_wm_latency_fops = {
3391 .owner = THIS_MODULE,
3392 .open = spr_wm_latency_open,
3394 .llseek = seq_lseek,
3395 .release = single_release,
3396 .write = spr_wm_latency_write
3399 static const struct file_operations i915_cur_wm_latency_fops = {
3400 .owner = THIS_MODULE,
3401 .open = cur_wm_latency_open,
3403 .llseek = seq_lseek,
3404 .release = single_release,
3405 .write = cur_wm_latency_write
3409 i915_wedged_get(void *data, u64 *val)
3411 struct drm_device *dev = data;
3412 struct drm_i915_private *dev_priv = dev->dev_private;
3414 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
3420 i915_wedged_set(void *data, u64 val)
3422 struct drm_device *dev = data;
3423 struct drm_i915_private *dev_priv = dev->dev_private;
3425 intel_runtime_pm_get(dev_priv);
3427 i915_handle_error(dev, val,
3428 "Manually setting wedged to %llu", val);
3430 intel_runtime_pm_put(dev_priv);
3435 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3436 i915_wedged_get, i915_wedged_set,
3440 i915_ring_stop_get(void *data, u64 *val)
3442 struct drm_device *dev = data;
3443 struct drm_i915_private *dev_priv = dev->dev_private;
3445 *val = dev_priv->gpu_error.stop_rings;
3451 i915_ring_stop_set(void *data, u64 val)
3453 struct drm_device *dev = data;
3454 struct drm_i915_private *dev_priv = dev->dev_private;
3457 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
3459 ret = mutex_lock_interruptible(&dev->struct_mutex);
3463 dev_priv->gpu_error.stop_rings = val;
3464 mutex_unlock(&dev->struct_mutex);
3469 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
3470 i915_ring_stop_get, i915_ring_stop_set,
3474 i915_ring_missed_irq_get(void *data, u64 *val)
3476 struct drm_device *dev = data;
3477 struct drm_i915_private *dev_priv = dev->dev_private;
3479 *val = dev_priv->gpu_error.missed_irq_rings;
3484 i915_ring_missed_irq_set(void *data, u64 val)
3486 struct drm_device *dev = data;
3487 struct drm_i915_private *dev_priv = dev->dev_private;
3490 /* Lock against concurrent debugfs callers */
3491 ret = mutex_lock_interruptible(&dev->struct_mutex);
3494 dev_priv->gpu_error.missed_irq_rings = val;
3495 mutex_unlock(&dev->struct_mutex);
3500 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3501 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3505 i915_ring_test_irq_get(void *data, u64 *val)
3507 struct drm_device *dev = data;
3508 struct drm_i915_private *dev_priv = dev->dev_private;
3510 *val = dev_priv->gpu_error.test_irq_rings;
3516 i915_ring_test_irq_set(void *data, u64 val)
3518 struct drm_device *dev = data;
3519 struct drm_i915_private *dev_priv = dev->dev_private;
3522 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
3524 /* Lock against concurrent debugfs callers */
3525 ret = mutex_lock_interruptible(&dev->struct_mutex);
3529 dev_priv->gpu_error.test_irq_rings = val;
3530 mutex_unlock(&dev->struct_mutex);
3535 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
3536 i915_ring_test_irq_get, i915_ring_test_irq_set,
3539 #define DROP_UNBOUND 0x1
3540 #define DROP_BOUND 0x2
3541 #define DROP_RETIRE 0x4
3542 #define DROP_ACTIVE 0x8
3543 #define DROP_ALL (DROP_UNBOUND | \
3548 i915_drop_caches_get(void *data, u64 *val)
3556 i915_drop_caches_set(void *data, u64 val)
3558 struct drm_device *dev = data;
3559 struct drm_i915_private *dev_priv = dev->dev_private;
3560 struct drm_i915_gem_object *obj, *next;
3561 struct i915_address_space *vm;
3562 struct i915_vma *vma, *x;
3565 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
3567 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3568 * on ioctls on -EAGAIN. */
3569 ret = mutex_lock_interruptible(&dev->struct_mutex);
3573 if (val & DROP_ACTIVE) {
3574 ret = i915_gpu_idle(dev);
3579 if (val & (DROP_RETIRE | DROP_ACTIVE))
3580 i915_gem_retire_requests(dev);
3582 if (val & DROP_BOUND) {
3583 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3584 list_for_each_entry_safe(vma, x, &vm->inactive_list,
3589 ret = i915_vma_unbind(vma);
3596 if (val & DROP_UNBOUND) {
3597 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
3599 if (obj->pages_pin_count == 0) {
3600 ret = i915_gem_object_put_pages(obj);
3607 mutex_unlock(&dev->struct_mutex);
3612 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3613 i915_drop_caches_get, i915_drop_caches_set,
3617 i915_max_freq_get(void *data, u64 *val)
3619 struct drm_device *dev = data;
3620 struct drm_i915_private *dev_priv = dev->dev_private;
3623 if (INTEL_INFO(dev)->gen < 6)
3626 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3628 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3632 if (IS_VALLEYVIEW(dev))
3633 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
3635 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3636 mutex_unlock(&dev_priv->rps.hw_lock);
3642 i915_max_freq_set(void *data, u64 val)
3644 struct drm_device *dev = data;
3645 struct drm_i915_private *dev_priv = dev->dev_private;
3646 u32 rp_state_cap, hw_max, hw_min;
3649 if (INTEL_INFO(dev)->gen < 6)
3652 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3654 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
3656 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3661 * Turbo will still be enabled, but won't go above the set value.
3663 if (IS_VALLEYVIEW(dev)) {
3664 val = vlv_freq_opcode(dev_priv, val);
3666 hw_max = dev_priv->rps.max_freq;
3667 hw_min = dev_priv->rps.min_freq;
3669 do_div(val, GT_FREQUENCY_MULTIPLIER);
3671 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3672 hw_max = dev_priv->rps.max_freq;
3673 hw_min = (rp_state_cap >> 16) & 0xff;
3676 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
3677 mutex_unlock(&dev_priv->rps.hw_lock);
3681 dev_priv->rps.max_freq_softlimit = val;
3683 if (IS_VALLEYVIEW(dev))
3684 valleyview_set_rps(dev, val);
3686 gen6_set_rps(dev, val);
3688 mutex_unlock(&dev_priv->rps.hw_lock);
3693 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
3694 i915_max_freq_get, i915_max_freq_set,
3698 i915_min_freq_get(void *data, u64 *val)
3700 struct drm_device *dev = data;
3701 struct drm_i915_private *dev_priv = dev->dev_private;
3704 if (INTEL_INFO(dev)->gen < 6)
3707 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3709 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3713 if (IS_VALLEYVIEW(dev))
3714 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
3716 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3717 mutex_unlock(&dev_priv->rps.hw_lock);
3723 i915_min_freq_set(void *data, u64 val)
3725 struct drm_device *dev = data;
3726 struct drm_i915_private *dev_priv = dev->dev_private;
3727 u32 rp_state_cap, hw_max, hw_min;
3730 if (INTEL_INFO(dev)->gen < 6)
3733 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3735 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
3737 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3742 * Turbo will still be enabled, but won't go below the set value.
3744 if (IS_VALLEYVIEW(dev)) {
3745 val = vlv_freq_opcode(dev_priv, val);
3747 hw_max = dev_priv->rps.max_freq;
3748 hw_min = dev_priv->rps.min_freq;
3750 do_div(val, GT_FREQUENCY_MULTIPLIER);
3752 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3753 hw_max = dev_priv->rps.max_freq;
3754 hw_min = (rp_state_cap >> 16) & 0xff;
3757 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
3758 mutex_unlock(&dev_priv->rps.hw_lock);
3762 dev_priv->rps.min_freq_softlimit = val;
3764 if (IS_VALLEYVIEW(dev))
3765 valleyview_set_rps(dev, val);
3767 gen6_set_rps(dev, val);
3769 mutex_unlock(&dev_priv->rps.hw_lock);
3774 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
3775 i915_min_freq_get, i915_min_freq_set,
3779 i915_cache_sharing_get(void *data, u64 *val)
3781 struct drm_device *dev = data;
3782 struct drm_i915_private *dev_priv = dev->dev_private;
3786 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3789 ret = mutex_lock_interruptible(&dev->struct_mutex);
3792 intel_runtime_pm_get(dev_priv);
3794 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3796 intel_runtime_pm_put(dev_priv);
3797 mutex_unlock(&dev_priv->dev->struct_mutex);
3799 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3805 i915_cache_sharing_set(void *data, u64 val)
3807 struct drm_device *dev = data;
3808 struct drm_i915_private *dev_priv = dev->dev_private;
3811 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3817 intel_runtime_pm_get(dev_priv);
3818 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3820 /* Update the cache sharing policy here as well */
3821 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3822 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3823 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
3824 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3826 intel_runtime_pm_put(dev_priv);
3830 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3831 i915_cache_sharing_get, i915_cache_sharing_set,
3834 static int i915_forcewake_open(struct inode *inode, struct file *file)
3836 struct drm_device *dev = inode->i_private;
3837 struct drm_i915_private *dev_priv = dev->dev_private;
3839 if (INTEL_INFO(dev)->gen < 6)
3842 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3847 static int i915_forcewake_release(struct inode *inode, struct file *file)
3849 struct drm_device *dev = inode->i_private;
3850 struct drm_i915_private *dev_priv = dev->dev_private;
3852 if (INTEL_INFO(dev)->gen < 6)
3855 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3860 static const struct file_operations i915_forcewake_fops = {
3861 .owner = THIS_MODULE,
3862 .open = i915_forcewake_open,
3863 .release = i915_forcewake_release,
3866 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
3868 struct drm_device *dev = minor->dev;
3871 ent = debugfs_create_file("i915_forcewake_user",
3874 &i915_forcewake_fops);
3878 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
3881 static int i915_debugfs_create(struct dentry *root,
3882 struct drm_minor *minor,
3884 const struct file_operations *fops)
3886 struct drm_device *dev = minor->dev;
3889 ent = debugfs_create_file(name,
3896 return drm_add_fake_info_node(minor, ent, fops);
3899 static const struct drm_info_list i915_debugfs_list[] = {
3900 {"i915_capabilities", i915_capabilities, 0},
3901 {"i915_gem_objects", i915_gem_object_info, 0},
3902 {"i915_gem_gtt", i915_gem_gtt_info, 0},
3903 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
3904 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
3905 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
3906 {"i915_gem_stolen", i915_gem_stolen_list_info },
3907 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
3908 {"i915_gem_request", i915_gem_request_info, 0},
3909 {"i915_gem_seqno", i915_gem_seqno_info, 0},
3910 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
3911 {"i915_gem_interrupt", i915_interrupt_info, 0},
3912 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
3913 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
3914 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
3915 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
3916 {"i915_frequency_info", i915_frequency_info, 0},
3917 {"i915_drpc_info", i915_drpc_info, 0},
3918 {"i915_emon_status", i915_emon_status, 0},
3919 {"i915_ring_freq_table", i915_ring_freq_table, 0},
3920 {"i915_fbc_status", i915_fbc_status, 0},
3921 {"i915_ips_status", i915_ips_status, 0},
3922 {"i915_sr_status", i915_sr_status, 0},
3923 {"i915_opregion", i915_opregion, 0},
3924 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
3925 {"i915_context_status", i915_context_status, 0},
3926 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
3927 {"i915_swizzle_info", i915_swizzle_info, 0},
3928 {"i915_ppgtt_info", i915_ppgtt_info, 0},
3929 {"i915_llc", i915_llc, 0},
3930 {"i915_edp_psr_status", i915_edp_psr_status, 0},
3931 {"i915_sink_crc_eDP1", i915_sink_crc, 0},
3932 {"i915_energy_uJ", i915_energy_uJ, 0},
3933 {"i915_pc8_status", i915_pc8_status, 0},
3934 {"i915_power_domain_info", i915_power_domain_info, 0},
3935 {"i915_display_info", i915_display_info, 0},
3936 {"i915_semaphore_status", i915_semaphore_status, 0},
3937 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
3938 {"i915_dp_mst_info", i915_dp_mst_info, 0},
3940 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3942 static const struct i915_debugfs_files {
3944 const struct file_operations *fops;
3945 } i915_debugfs_files[] = {
3946 {"i915_wedged", &i915_wedged_fops},
3947 {"i915_max_freq", &i915_max_freq_fops},
3948 {"i915_min_freq", &i915_min_freq_fops},
3949 {"i915_cache_sharing", &i915_cache_sharing_fops},
3950 {"i915_ring_stop", &i915_ring_stop_fops},
3951 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
3952 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
3953 {"i915_gem_drop_caches", &i915_drop_caches_fops},
3954 {"i915_error_state", &i915_error_state_fops},
3955 {"i915_next_seqno", &i915_next_seqno_fops},
3956 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
3957 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
3958 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
3959 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
3962 void intel_display_crc_init(struct drm_device *dev)
3964 struct drm_i915_private *dev_priv = dev->dev_private;
3967 for_each_pipe(pipe) {
3968 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3970 pipe_crc->opened = false;
3971 spin_lock_init(&pipe_crc->lock);
3972 init_waitqueue_head(&pipe_crc->wq);
3976 int i915_debugfs_init(struct drm_minor *minor)
3980 ret = i915_forcewake_create(minor->debugfs_root, minor);
3984 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3985 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
3990 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
3991 ret = i915_debugfs_create(minor->debugfs_root, minor,
3992 i915_debugfs_files[i].name,
3993 i915_debugfs_files[i].fops);
3998 return drm_debugfs_create_files(i915_debugfs_list,
3999 I915_DEBUGFS_ENTRIES,
4000 minor->debugfs_root, minor);
4003 void i915_debugfs_cleanup(struct drm_minor *minor)
4007 drm_debugfs_remove_files(i915_debugfs_list,
4008 I915_DEBUGFS_ENTRIES, minor);
4010 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
4013 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
4014 struct drm_info_list *info_list =
4015 (struct drm_info_list *)&i915_pipe_crc_data[i];
4017 drm_debugfs_remove_files(info_list, 1, minor);
4020 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4021 struct drm_info_list *info_list =
4022 (struct drm_info_list *) i915_debugfs_files[i].fops;
4024 drm_debugfs_remove_files(info_list, 1, minor);