2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
33 static const char *yesno(int v)
35 return v ? "yes" : "no";
38 static const char *ring_str(int ring)
41 case RCS: return "render";
42 case VCS: return "bsd";
43 case BCS: return "blt";
44 case VECS: return "vebox";
49 static const char *pin_flag(int pinned)
59 static const char *tiling_flag(int tiling)
63 case I915_TILING_NONE: return "";
64 case I915_TILING_X: return " X";
65 case I915_TILING_Y: return " Y";
69 static const char *dirty_flag(int dirty)
71 return dirty ? " dirty" : "";
74 static const char *purgeable_flag(int purgeable)
76 return purgeable ? " purgeable" : "";
79 static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
82 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
87 if (e->bytes == e->size - 1 || e->err)
93 static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
96 if (e->pos + len <= e->start) {
101 /* First vsnprintf needs to fit in its entirety for memmove */
102 if (len >= e->size) {
110 static void __i915_error_advance(struct drm_i915_error_state_buf *e,
113 /* If this is first printf in this window, adjust it so that
114 * start position matches start of the buffer
117 if (e->pos < e->start) {
118 const size_t off = e->start - e->pos;
120 /* Should not happen but be paranoid */
121 if (off > len || e->bytes) {
126 memmove(e->buf, e->buf + off, len - off);
127 e->bytes = len - off;
136 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
137 const char *f, va_list args)
141 if (!__i915_error_ok(e))
144 /* Seek the first printf which is hits start position */
145 if (e->pos < e->start) {
149 if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
153 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
154 if (len >= e->size - e->bytes)
155 len = e->size - e->bytes - 1;
157 __i915_error_advance(e, len);
160 static void i915_error_puts(struct drm_i915_error_state_buf *e,
165 if (!__i915_error_ok(e))
170 /* Seek the first printf which is hits start position */
171 if (e->pos < e->start) {
172 if (!__i915_error_seek(e, len))
176 if (len >= e->size - e->bytes)
177 len = e->size - e->bytes - 1;
178 memcpy(e->buf + e->bytes, str, len);
180 __i915_error_advance(e, len);
183 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
184 #define err_puts(e, s) i915_error_puts(e, s)
186 static void print_error_buffers(struct drm_i915_error_state_buf *m,
188 struct drm_i915_error_buffer *err,
191 err_printf(m, "%s [%d]:\n", name, count);
194 err_printf(m, " %08x %8u %02x %02x %x %x",
199 err->rseqno, err->wseqno);
200 err_puts(m, pin_flag(err->pinned));
201 err_puts(m, tiling_flag(err->tiling));
202 err_puts(m, dirty_flag(err->dirty));
203 err_puts(m, purgeable_flag(err->purgeable));
204 err_puts(m, err->ring != -1 ? " " : "");
205 err_puts(m, ring_str(err->ring));
206 err_puts(m, i915_cache_level_str(err->cache_level));
209 err_printf(m, " (name: %d)", err->name);
210 if (err->fence_reg != I915_FENCE_REG_NONE)
211 err_printf(m, " (fence: %d)", err->fence_reg);
218 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
225 case HANGCHECK_ACTIVE:
236 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
237 struct drm_device *dev,
238 struct drm_i915_error_state *error,
241 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
242 if (!error->ring[ring].valid)
245 err_printf(m, "%s command stream:\n", ring_str(ring));
246 err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
247 err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
248 err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
249 err_printf(m, " HWS: 0x%08x\n", error->hws[ring]);
250 err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
251 err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
252 err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
253 err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
254 if (INTEL_INFO(dev)->gen >= 4) {
255 err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]);
256 err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]);
257 err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
259 err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
260 err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
261 if (INTEL_INFO(dev)->gen >= 6) {
262 err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
263 err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
264 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
265 error->semaphore_mboxes[ring][0],
266 error->semaphore_seqno[ring][0]);
267 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
268 error->semaphore_mboxes[ring][1],
269 error->semaphore_seqno[ring][1]);
270 if (HAS_VEBOX(dev)) {
271 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
272 error->semaphore_mboxes[ring][2],
273 error->semaphore_seqno[ring][2]);
276 err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
277 err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
278 err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
279 err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
280 err_printf(m, " hangcheck: %s [%d]\n",
281 hangcheck_action_to_str(error->hangcheck_action[ring]),
282 error->hangcheck_score[ring]);
285 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
290 i915_error_vprintf(e, f, args);
294 int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
295 const struct i915_error_state_file_priv *error_priv)
297 struct drm_device *dev = error_priv->dev;
298 drm_i915_private_t *dev_priv = dev->dev_private;
299 struct drm_i915_error_state *error = error_priv->error;
300 int i, j, page, offset, elt;
303 err_printf(m, "no error state collected\n");
307 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
308 error->time.tv_usec);
309 err_printf(m, "Kernel: " UTS_RELEASE "\n");
310 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
311 err_printf(m, "EIR: 0x%08x\n", error->eir);
312 err_printf(m, "IER: 0x%08x\n", error->ier);
313 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
314 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
315 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
316 err_printf(m, "CCID: 0x%08x\n", error->ccid);
317 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
319 for (i = 0; i < dev_priv->num_fence_regs; i++)
320 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
322 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
323 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
324 error->extra_instdone[i]);
326 if (INTEL_INFO(dev)->gen >= 6) {
327 err_printf(m, "ERROR: 0x%08x\n", error->error);
328 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
331 if (INTEL_INFO(dev)->gen == 7)
332 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
334 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
335 i915_ring_error_state(m, dev, error, i);
337 if (error->active_bo)
338 print_error_buffers(m, "Active",
340 error->active_bo_count[0]);
342 if (error->pinned_bo)
343 print_error_buffers(m, "Pinned",
345 error->pinned_bo_count[0]);
347 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
348 struct drm_i915_error_object *obj;
350 if ((obj = error->ring[i].batchbuffer)) {
351 err_printf(m, "%s --- gtt_offset = 0x%08x\n",
352 dev_priv->ring[i].name,
355 for (page = 0; page < obj->page_count; page++) {
356 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
357 err_printf(m, "%08x : %08x\n", offset,
358 obj->pages[page][elt]);
364 if (error->ring[i].num_requests) {
365 err_printf(m, "%s --- %d requests\n",
366 dev_priv->ring[i].name,
367 error->ring[i].num_requests);
368 for (j = 0; j < error->ring[i].num_requests; j++) {
369 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
370 error->ring[i].requests[j].seqno,
371 error->ring[i].requests[j].jiffies,
372 error->ring[i].requests[j].tail);
376 if ((obj = error->ring[i].ringbuffer)) {
377 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
378 dev_priv->ring[i].name,
381 for (page = 0; page < obj->page_count; page++) {
382 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
383 err_printf(m, "%08x : %08x\n",
385 obj->pages[page][elt]);
391 if ((obj = error->ring[i].hws)) {
392 err_printf(m, "%s --- HW Status = 0x%08x\n",
393 dev_priv->ring[i].name,
396 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
397 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
400 obj->pages[0][elt+1],
401 obj->pages[0][elt+2],
402 obj->pages[0][elt+3]);
407 if ((obj = error->ring[i].ctx)) {
408 err_printf(m, "%s --- HW Context = 0x%08x\n",
409 dev_priv->ring[i].name,
412 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
413 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
416 obj->pages[0][elt+1],
417 obj->pages[0][elt+2],
418 obj->pages[0][elt+3]);
425 intel_overlay_print_error_state(m, error->overlay);
428 intel_display_print_error_state(m, dev, error->display);
431 if (m->bytes == 0 && m->err)
437 int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
438 size_t count, loff_t pos)
440 memset(ebuf, 0, sizeof(*ebuf));
442 /* We need to have enough room to store any i915_error_state printf
443 * so that we can move it to start position.
445 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
446 ebuf->buf = kmalloc(ebuf->size,
447 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
449 if (ebuf->buf == NULL) {
450 ebuf->size = PAGE_SIZE;
451 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
454 if (ebuf->buf == NULL) {
456 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
459 if (ebuf->buf == NULL)
467 static void i915_error_object_free(struct drm_i915_error_object *obj)
474 for (page = 0; page < obj->page_count; page++)
475 kfree(obj->pages[page]);
480 static void i915_error_state_free(struct kref *error_ref)
482 struct drm_i915_error_state *error = container_of(error_ref,
483 typeof(*error), ref);
486 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
487 i915_error_object_free(error->ring[i].batchbuffer);
488 i915_error_object_free(error->ring[i].ringbuffer);
489 i915_error_object_free(error->ring[i].hws);
490 i915_error_object_free(error->ring[i].ctx);
491 kfree(error->ring[i].requests);
494 kfree(error->active_bo);
495 kfree(error->overlay);
496 kfree(error->display);
500 static struct drm_i915_error_object *
501 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
502 struct drm_i915_gem_object *src,
503 struct i915_address_space *vm,
506 struct drm_i915_error_object *dst;
510 if (src == NULL || src->pages == NULL)
513 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
517 reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
518 for (i = 0; i < num_pages; i++) {
522 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
526 local_irq_save(flags);
527 if (reloc_offset < dev_priv->gtt.mappable_end &&
528 src->has_global_gtt_mapping &&
532 /* Simply ignore tiling or any overlapping fence.
533 * It's part of the error state, and this hopefully
534 * captures what the GPU read.
537 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
539 memcpy_fromio(d, s, PAGE_SIZE);
540 io_mapping_unmap_atomic(s);
541 } else if (src->stolen) {
542 unsigned long offset;
544 offset = dev_priv->mm.stolen_base;
545 offset += src->stolen->start;
546 offset += i << PAGE_SHIFT;
548 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
553 page = i915_gem_object_get_page(src, i);
555 drm_clflush_pages(&page, 1);
557 s = kmap_atomic(page);
558 memcpy(d, s, PAGE_SIZE);
561 drm_clflush_pages(&page, 1);
563 local_irq_restore(flags);
567 reloc_offset += PAGE_SIZE;
569 dst->page_count = num_pages;
575 kfree(dst->pages[i]);
579 #define i915_error_object_create(dev_priv, src, vm) \
580 i915_error_object_create_sized((dev_priv), (src), (vm), \
581 (src)->base.size>>PAGE_SHIFT)
583 #define i915_error_ggtt_object_create(dev_priv, src) \
584 i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
585 (src)->base.size>>PAGE_SHIFT)
587 static void capture_bo(struct drm_i915_error_buffer *err,
588 struct drm_i915_gem_object *obj)
590 err->size = obj->base.size;
591 err->name = obj->base.name;
592 err->rseqno = obj->last_read_seqno;
593 err->wseqno = obj->last_write_seqno;
594 err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
595 err->read_domains = obj->base.read_domains;
596 err->write_domain = obj->base.write_domain;
597 err->fence_reg = obj->fence_reg;
599 if (i915_gem_obj_is_pinned(obj))
601 if (obj->user_pin_count > 0)
603 err->tiling = obj->tiling_mode;
604 err->dirty = obj->dirty;
605 err->purgeable = obj->madv != I915_MADV_WILLNEED;
606 err->ring = obj->ring ? obj->ring->id : -1;
607 err->cache_level = obj->cache_level;
610 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
611 int count, struct list_head *head)
613 struct i915_vma *vma;
616 list_for_each_entry(vma, head, mm_list) {
617 capture_bo(err++, vma->obj);
625 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
626 int count, struct list_head *head)
628 struct drm_i915_gem_object *obj;
631 list_for_each_entry(obj, head, global_list) {
632 if (!i915_gem_obj_is_pinned(obj))
635 capture_bo(err++, obj);
643 static void i915_gem_record_fences(struct drm_device *dev,
644 struct drm_i915_error_state *error)
646 struct drm_i915_private *dev_priv = dev->dev_private;
650 switch (INTEL_INFO(dev)->gen) {
654 for (i = 0; i < dev_priv->num_fence_regs; i++)
655 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
659 for (i = 0; i < 16; i++)
660 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
663 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
664 for (i = 0; i < 8; i++)
665 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
667 for (i = 0; i < 8; i++)
668 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
676 /* This assumes all batchbuffers are executed from the PPGTT. It might have to
677 * change in the future. */
678 static bool is_active_vm(struct i915_address_space *vm,
679 struct intel_ring_buffer *ring)
681 struct drm_device *dev = vm->dev;
682 struct drm_i915_private *dev_priv = dev->dev_private;
683 struct i915_hw_ppgtt *ppgtt;
685 if (INTEL_INFO(dev)->gen < 7)
686 return i915_is_ggtt(vm);
688 /* FIXME: This ignores that the global gtt vm is also on this list. */
689 ppgtt = container_of(vm, struct i915_hw_ppgtt, base);
691 if (INTEL_INFO(dev)->gen >= 8) {
692 u64 pdp0 = (u64)I915_READ(GEN8_RING_PDP_UDW(ring, 0)) << 32;
693 pdp0 |= I915_READ(GEN8_RING_PDP_LDW(ring, 0));
694 return pdp0 == ppgtt->pd_dma_addr[0];
697 pp_db = I915_READ(RING_PP_DIR_BASE(ring));
698 return (pp_db >> 10) == ppgtt->pd_offset;
702 static struct drm_i915_error_object *
703 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
704 struct intel_ring_buffer *ring)
706 struct i915_address_space *vm;
707 struct i915_vma *vma;
708 struct drm_i915_gem_object *obj;
709 bool found_active = false;
712 if (!ring->get_seqno)
715 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
716 u32 acthd = I915_READ(ACTHD);
718 if (WARN_ON(ring->id != RCS))
721 obj = ring->scratch.obj;
723 acthd >= i915_gem_obj_ggtt_offset(obj) &&
724 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
725 return i915_error_ggtt_object_create(dev_priv, obj);
728 seqno = ring->get_seqno(ring, false);
729 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
730 if (!is_active_vm(vm, ring))
735 list_for_each_entry(vma, &vm->active_list, mm_list) {
737 if (obj->ring != ring)
740 if (i915_seqno_passed(seqno, obj->last_read_seqno))
743 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
746 /* We need to copy these to an anonymous buffer as the simplest
747 * method to avoid being overwritten by userspace.
749 return i915_error_object_create(dev_priv, obj, vm);
753 WARN_ON(!found_active);
757 static void i915_record_ring_state(struct drm_device *dev,
758 struct drm_i915_error_state *error,
759 struct intel_ring_buffer *ring)
761 struct drm_i915_private *dev_priv = dev->dev_private;
763 if (INTEL_INFO(dev)->gen >= 6) {
764 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
765 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
766 error->semaphore_mboxes[ring->id][0]
767 = I915_READ(RING_SYNC_0(ring->mmio_base));
768 error->semaphore_mboxes[ring->id][1]
769 = I915_READ(RING_SYNC_1(ring->mmio_base));
770 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
771 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
774 if (HAS_VEBOX(dev)) {
775 error->semaphore_mboxes[ring->id][2] =
776 I915_READ(RING_SYNC_2(ring->mmio_base));
777 error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
780 if (INTEL_INFO(dev)->gen >= 4) {
781 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
782 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
783 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
784 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
785 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
786 error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
787 if (INTEL_INFO(dev)->gen >= 8)
788 error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
789 error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
791 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
792 error->ipeir[ring->id] = I915_READ(IPEIR);
793 error->ipehr[ring->id] = I915_READ(IPEHR);
794 error->instdone[ring->id] = I915_READ(INSTDONE);
797 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
798 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
799 error->seqno[ring->id] = ring->get_seqno(ring, false);
800 error->acthd[ring->id] = intel_ring_get_active_head(ring);
801 error->head[ring->id] = I915_READ_HEAD(ring);
802 error->tail[ring->id] = I915_READ_TAIL(ring);
803 error->ctl[ring->id] = I915_READ_CTL(ring);
805 if (I915_NEED_GFX_HWS(dev)) {
812 mmio = RENDER_HWS_PGA_GEN7;
815 mmio = BLT_HWS_PGA_GEN7;
818 mmio = BSD_HWS_PGA_GEN7;
821 mmio = VEBOX_HWS_PGA_GEN7;
824 } else if (IS_GEN6(ring->dev)) {
825 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
827 /* XXX: gen8 returns to sanity */
828 mmio = RING_HWS_PGA(ring->mmio_base);
831 error->hws[ring->id] = I915_READ(mmio);
834 error->cpu_ring_head[ring->id] = ring->head;
835 error->cpu_ring_tail[ring->id] = ring->tail;
837 error->hangcheck_score[ring->id] = ring->hangcheck.score;
838 error->hangcheck_action[ring->id] = ring->hangcheck.action;
842 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
843 struct drm_i915_error_state *error,
844 struct drm_i915_error_ring *ering)
846 struct drm_i915_private *dev_priv = ring->dev->dev_private;
847 struct drm_i915_gem_object *obj;
849 /* Currently render ring is the only HW context user */
850 if (ring->id != RCS || !error->ccid)
853 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
854 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
855 ering->ctx = i915_error_object_create_sized(dev_priv,
864 static void i915_gem_record_rings(struct drm_device *dev,
865 struct drm_i915_error_state *error)
867 struct drm_i915_private *dev_priv = dev->dev_private;
868 struct drm_i915_gem_request *request;
871 for (i = 0; i < I915_NUM_RINGS; i++) {
872 struct intel_ring_buffer *ring = &dev_priv->ring[i];
874 if (ring->dev == NULL)
877 error->ring[i].valid = true;
879 i915_record_ring_state(dev, error, ring);
881 error->ring[i].batchbuffer =
882 i915_error_first_batchbuffer(dev_priv, ring);
884 error->ring[i].ringbuffer =
885 i915_error_ggtt_object_create(dev_priv, ring->obj);
887 if (ring->status_page.obj)
889 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
891 i915_gem_record_active_context(ring, error, &error->ring[i]);
894 list_for_each_entry(request, &ring->request_list, list)
897 error->ring[i].num_requests = count;
898 error->ring[i].requests =
899 kcalloc(count, sizeof(*error->ring[i].requests),
901 if (error->ring[i].requests == NULL) {
902 error->ring[i].num_requests = 0;
907 list_for_each_entry(request, &ring->request_list, list) {
908 struct drm_i915_error_request *erq;
910 erq = &error->ring[i].requests[count++];
911 erq->seqno = request->seqno;
912 erq->jiffies = request->emitted_jiffies;
913 erq->tail = request->tail;
918 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
921 static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
922 struct drm_i915_error_state *error,
923 struct i915_address_space *vm,
926 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
927 struct drm_i915_gem_object *obj;
928 struct i915_vma *vma;
932 list_for_each_entry(vma, &vm->active_list, mm_list)
934 error->active_bo_count[ndx] = i;
935 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
936 if (i915_gem_obj_is_pinned(obj))
938 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
941 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
943 pinned_bo = active_bo + error->active_bo_count[ndx];
947 error->active_bo_count[ndx] =
948 capture_active_bo(active_bo,
949 error->active_bo_count[ndx],
953 error->pinned_bo_count[ndx] =
954 capture_pinned_bo(pinned_bo,
955 error->pinned_bo_count[ndx],
956 &dev_priv->mm.bound_list);
957 error->active_bo[ndx] = active_bo;
958 error->pinned_bo[ndx] = pinned_bo;
961 static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
962 struct drm_i915_error_state *error)
964 struct i915_address_space *vm;
967 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
970 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
971 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
972 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
974 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
977 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
978 i915_gem_capture_vm(dev_priv, error, vm, i++);
982 * i915_capture_error_state - capture an error record for later analysis
985 * Should be called when an error is detected (either a hang or an error
986 * interrupt) to capture error state from the time of the error. Fills
987 * out a structure which becomes available in debugfs for user level tools
990 void i915_capture_error_state(struct drm_device *dev)
992 struct drm_i915_private *dev_priv = dev->dev_private;
993 struct drm_i915_error_state *error;
997 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
998 error = dev_priv->gpu_error.first_error;
999 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1003 /* Account for pipe specific data like PIPE*STAT */
1004 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1006 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1010 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1011 dev->primary->index);
1012 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1013 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1014 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1015 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1017 kref_init(&error->ref);
1018 error->eir = I915_READ(EIR);
1019 error->pgtbl_er = I915_READ(PGTBL_ER);
1020 if (HAS_HW_CONTEXTS(dev))
1021 error->ccid = I915_READ(CCID);
1023 if (HAS_PCH_SPLIT(dev))
1024 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1025 else if (IS_VALLEYVIEW(dev))
1026 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1027 else if (IS_GEN2(dev))
1028 error->ier = I915_READ16(IER);
1030 error->ier = I915_READ(IER);
1032 if (INTEL_INFO(dev)->gen >= 6)
1033 error->derrmr = I915_READ(DERRMR);
1035 if (IS_VALLEYVIEW(dev))
1036 error->forcewake = I915_READ(FORCEWAKE_VLV);
1037 else if (INTEL_INFO(dev)->gen >= 7)
1038 error->forcewake = I915_READ(FORCEWAKE_MT);
1039 else if (INTEL_INFO(dev)->gen == 6)
1040 error->forcewake = I915_READ(FORCEWAKE);
1042 if (!HAS_PCH_SPLIT(dev))
1044 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1046 if (INTEL_INFO(dev)->gen >= 6) {
1047 error->error = I915_READ(ERROR_GEN6);
1048 error->done_reg = I915_READ(DONE_REG);
1051 if (INTEL_INFO(dev)->gen == 7)
1052 error->err_int = I915_READ(GEN7_ERR_INT);
1054 i915_get_extra_instdone(dev, error->extra_instdone);
1056 i915_gem_capture_buffers(dev_priv, error);
1057 i915_gem_record_fences(dev, error);
1058 i915_gem_record_rings(dev, error);
1060 do_gettimeofday(&error->time);
1062 error->overlay = intel_overlay_capture_error_state(dev);
1063 error->display = intel_display_capture_error_state(dev);
1065 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1066 if (dev_priv->gpu_error.first_error == NULL) {
1067 dev_priv->gpu_error.first_error = error;
1070 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1073 i915_error_state_free(&error->ref);
1076 void i915_error_state_get(struct drm_device *dev,
1077 struct i915_error_state_file_priv *error_priv)
1079 struct drm_i915_private *dev_priv = dev->dev_private;
1080 unsigned long flags;
1082 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1083 error_priv->error = dev_priv->gpu_error.first_error;
1084 if (error_priv->error)
1085 kref_get(&error_priv->error->ref);
1086 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1090 void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1092 if (error_priv->error)
1093 kref_put(&error_priv->error->ref, i915_error_state_free);
1096 void i915_destroy_error_state(struct drm_device *dev)
1098 struct drm_i915_private *dev_priv = dev->dev_private;
1099 struct drm_i915_error_state *error;
1100 unsigned long flags;
1102 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1103 error = dev_priv->gpu_error.first_error;
1104 dev_priv->gpu_error.first_error = NULL;
1105 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1108 kref_put(&error->ref, i915_error_state_free);
1111 const char *i915_cache_level_str(int type)
1114 case I915_CACHE_NONE: return " uncached";
1115 case I915_CACHE_LLC: return " snooped or LLC";
1116 case I915_CACHE_L3_LLC: return " L3+LLC";
1117 case I915_CACHE_WT: return " WT";
1122 /* NB: please notice the memset */
1123 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1125 struct drm_i915_private *dev_priv = dev->dev_private;
1126 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1128 switch (INTEL_INFO(dev)->gen) {
1131 instdone[0] = I915_READ(INSTDONE);
1136 instdone[0] = I915_READ(INSTDONE_I965);
1137 instdone[1] = I915_READ(INSTDONE1);
1140 WARN_ONCE(1, "Unsupported platform\n");
1143 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1144 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1145 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1146 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);