2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
28 #include <linux/pm_runtime.h>
29 #include <linux/bsearch.h>
31 #define FORCEWAKE_ACK_TIMEOUT_MS 50
33 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
35 static const char * const forcewake_domain_names[] = {
42 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
44 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
46 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
47 return forcewake_domain_names[id];
55 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
57 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
58 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
62 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
65 hrtimer_start_range_ns(&d->timer,
66 ktime_set(0, NSEC_PER_MSEC),
72 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
74 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
75 FORCEWAKE_KERNEL) == 0,
76 FORCEWAKE_ACK_TIMEOUT_MS))
77 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
78 intel_uncore_forcewake_domain_to_str(d->id));
82 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
84 __raw_i915_write32(d->i915, d->reg_set, d->val_set);
88 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
90 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
92 FORCEWAKE_ACK_TIMEOUT_MS))
93 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
94 intel_uncore_forcewake_domain_to_str(d->id));
98 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
100 __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
104 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
106 /* something from same cacheline, but not from the set register */
107 if (i915_mmio_reg_valid(d->reg_post))
108 __raw_posting_read(d->i915, d->reg_post);
112 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
114 struct intel_uncore_forcewake_domain *d;
116 for_each_fw_domain_masked(d, fw_domains, dev_priv) {
117 fw_domain_wait_ack_clear(d);
121 for_each_fw_domain_masked(d, fw_domains, dev_priv)
122 fw_domain_wait_ack(d);
126 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
128 struct intel_uncore_forcewake_domain *d;
130 for_each_fw_domain_masked(d, fw_domains, dev_priv) {
132 fw_domain_posting_read(d);
137 fw_domains_posting_read(struct drm_i915_private *dev_priv)
139 struct intel_uncore_forcewake_domain *d;
141 /* No need to do for all, just do for first found */
142 for_each_fw_domain(d, dev_priv) {
143 fw_domain_posting_read(d);
149 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
151 struct intel_uncore_forcewake_domain *d;
153 if (dev_priv->uncore.fw_domains == 0)
156 for_each_fw_domain_masked(d, fw_domains, dev_priv)
159 fw_domains_posting_read(dev_priv);
162 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
164 /* w/a for a sporadic read returning 0 by waiting for the GT
167 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
168 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
169 DRM_ERROR("GT thread status wait timed out\n");
172 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
173 enum forcewake_domains fw_domains)
175 fw_domains_get(dev_priv, fw_domains);
177 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
178 __gen6_gt_wait_for_thread_c0(dev_priv);
181 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
185 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
186 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
187 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
190 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
191 enum forcewake_domains fw_domains)
193 fw_domains_put(dev_priv, fw_domains);
194 gen6_gt_check_fifodbg(dev_priv);
197 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
199 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
201 return count & GT_FIFO_FREE_ENTRIES_MASK;
204 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
208 /* On VLV, FIFO will be shared by both SW and HW.
209 * So, we need to read the FREE_ENTRIES everytime */
210 if (IS_VALLEYVIEW(dev_priv))
211 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
213 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
215 u32 fifo = fifo_free_entries(dev_priv);
217 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
219 fifo = fifo_free_entries(dev_priv);
221 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
223 dev_priv->uncore.fifo_count = fifo;
225 dev_priv->uncore.fifo_count--;
230 static enum hrtimer_restart
231 intel_uncore_fw_release_timer(struct hrtimer *timer)
233 struct intel_uncore_forcewake_domain *domain =
234 container_of(timer, struct intel_uncore_forcewake_domain, timer);
235 struct drm_i915_private *dev_priv = domain->i915;
236 unsigned long irqflags;
238 assert_rpm_device_not_suspended(dev_priv);
240 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
241 if (WARN_ON(domain->wake_count == 0))
242 domain->wake_count++;
244 if (--domain->wake_count == 0) {
245 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
246 dev_priv->uncore.fw_domains_active &= ~domain->mask;
249 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
251 return HRTIMER_NORESTART;
254 void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
257 unsigned long irqflags;
258 struct intel_uncore_forcewake_domain *domain;
259 int retry_count = 100;
260 enum forcewake_domains fw, active_domains;
262 /* Hold uncore.lock across reset to prevent any register access
263 * with forcewake not set correctly. Wait until all pending
264 * timers are run before holding.
269 for_each_fw_domain(domain, dev_priv) {
270 if (hrtimer_cancel(&domain->timer) == 0)
273 intel_uncore_fw_release_timer(&domain->timer);
276 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
278 for_each_fw_domain(domain, dev_priv) {
279 if (hrtimer_active(&domain->timer))
280 active_domains |= domain->mask;
283 if (active_domains == 0)
286 if (--retry_count == 0) {
287 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
291 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
295 WARN_ON(active_domains);
297 fw = dev_priv->uncore.fw_domains_active;
299 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
301 fw_domains_reset(dev_priv, FORCEWAKE_ALL);
303 if (restore) { /* If reset with a user forcewake, try to restore */
305 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
307 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
308 dev_priv->uncore.fifo_count =
309 fifo_free_entries(dev_priv);
313 assert_forcewakes_inactive(dev_priv);
315 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
318 static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
320 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
321 const unsigned int sets[4] = { 1, 1, 2, 2 };
322 const u32 cap = dev_priv->edram_cap;
324 return EDRAM_NUM_BANKS(cap) *
325 ways[EDRAM_WAYS_IDX(cap)] *
326 sets[EDRAM_SETS_IDX(cap)] *
330 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
332 if (!HAS_EDRAM(dev_priv))
335 /* The needed capability bits for size calculation
336 * are not there with pre gen9 so return 128MB always.
338 if (INTEL_GEN(dev_priv) < 9)
339 return 128 * 1024 * 1024;
341 return gen9_edram_size(dev_priv);
344 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
346 if (IS_HASWELL(dev_priv) ||
347 IS_BROADWELL(dev_priv) ||
348 INTEL_GEN(dev_priv) >= 9) {
349 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
352 /* NB: We can't write IDICR yet because we do not have gt funcs
355 dev_priv->edram_cap = 0;
358 if (HAS_EDRAM(dev_priv))
359 DRM_INFO("Found %lluMB of eDRAM\n",
360 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
364 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
368 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
369 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
372 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
378 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
382 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
383 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
386 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
392 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
394 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
395 return fpga_check_for_unclaimed_mmio(dev_priv);
397 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
398 return vlv_check_for_unclaimed_mmio(dev_priv);
403 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
404 bool restore_forcewake)
406 /* clear out unclaimed reg detection bit */
407 if (check_for_unclaimed_mmio(dev_priv))
408 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
410 /* clear out old GT FIFO errors */
411 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
412 __raw_i915_write32(dev_priv, GTFIFODBG,
413 __raw_i915_read32(dev_priv, GTFIFODBG));
415 /* WaDisableShadowRegForCpd:chv */
416 if (IS_CHERRYVIEW(dev_priv)) {
417 __raw_i915_write32(dev_priv, GTFIFOCTL,
418 __raw_i915_read32(dev_priv, GTFIFOCTL) |
419 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
420 GT_FIFO_CTL_RC6_POLICY_STALL);
423 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
426 void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
427 bool restore_forcewake)
429 __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
430 i915_check_and_clear_faults(dev_priv);
433 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
435 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
437 /* BIOS often leaves RC6 enabled, but disable it for hw init */
438 intel_sanitize_gt_powersave(dev_priv);
441 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
442 enum forcewake_domains fw_domains)
444 struct intel_uncore_forcewake_domain *domain;
446 fw_domains &= dev_priv->uncore.fw_domains;
448 for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
449 if (domain->wake_count++)
450 fw_domains &= ~domain->mask;
454 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
455 dev_priv->uncore.fw_domains_active |= fw_domains;
460 * intel_uncore_forcewake_get - grab forcewake domain references
461 * @dev_priv: i915 device instance
462 * @fw_domains: forcewake domains to get reference on
464 * This function can be used get GT's forcewake domain references.
465 * Normal register access will handle the forcewake domains automatically.
466 * However if some sequence requires the GT to not power down a particular
467 * forcewake domains this function should be called at the beginning of the
468 * sequence. And subsequently the reference should be dropped by symmetric
469 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
470 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
472 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
473 enum forcewake_domains fw_domains)
475 unsigned long irqflags;
477 if (!dev_priv->uncore.funcs.force_wake_get)
480 assert_rpm_wakelock_held(dev_priv);
482 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
483 __intel_uncore_forcewake_get(dev_priv, fw_domains);
484 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
488 * intel_uncore_forcewake_get__locked - grab forcewake domain references
489 * @dev_priv: i915 device instance
490 * @fw_domains: forcewake domains to get reference on
492 * See intel_uncore_forcewake_get(). This variant places the onus
493 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
495 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
496 enum forcewake_domains fw_domains)
498 assert_spin_locked(&dev_priv->uncore.lock);
500 if (!dev_priv->uncore.funcs.force_wake_get)
503 __intel_uncore_forcewake_get(dev_priv, fw_domains);
506 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
507 enum forcewake_domains fw_domains)
509 struct intel_uncore_forcewake_domain *domain;
511 fw_domains &= dev_priv->uncore.fw_domains;
513 for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
514 if (WARN_ON(domain->wake_count == 0))
517 if (--domain->wake_count)
520 fw_domain_arm_timer(domain);
525 * intel_uncore_forcewake_put - release a forcewake domain reference
526 * @dev_priv: i915 device instance
527 * @fw_domains: forcewake domains to put references
529 * This function drops the device-level forcewakes for specified
530 * domains obtained by intel_uncore_forcewake_get().
532 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
533 enum forcewake_domains fw_domains)
535 unsigned long irqflags;
537 if (!dev_priv->uncore.funcs.force_wake_put)
540 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
541 __intel_uncore_forcewake_put(dev_priv, fw_domains);
542 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
546 * intel_uncore_forcewake_put__locked - grab forcewake domain references
547 * @dev_priv: i915 device instance
548 * @fw_domains: forcewake domains to get reference on
550 * See intel_uncore_forcewake_put(). This variant places the onus
551 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
553 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
554 enum forcewake_domains fw_domains)
556 assert_spin_locked(&dev_priv->uncore.lock);
558 if (!dev_priv->uncore.funcs.force_wake_put)
561 __intel_uncore_forcewake_put(dev_priv, fw_domains);
564 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
566 if (!dev_priv->uncore.funcs.force_wake_get)
569 WARN_ON(dev_priv->uncore.fw_domains_active);
572 /* We give fast paths for the really cool registers */
573 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
575 #define __gen6_reg_read_fw_domains(offset) \
577 enum forcewake_domains __fwd; \
578 if (NEEDS_FORCE_WAKE(offset)) \
579 __fwd = FORCEWAKE_RENDER; \
585 static int fw_range_cmp(const void *key, const void *elt)
587 const struct intel_forcewake_range *entry = elt;
588 u32 offset = (u32)((unsigned long)key);
590 if (offset < entry->start)
592 else if (offset > entry->end)
598 static enum forcewake_domains
599 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
601 const struct intel_forcewake_range *table, *entry;
602 unsigned int num_entries;
604 table = dev_priv->uncore.fw_domains_table;
605 num_entries = dev_priv->uncore.fw_domains_table_entries;
607 entry = bsearch((void *)(unsigned long)offset, (const void *)table,
608 num_entries, sizeof(struct intel_forcewake_range),
611 return entry ? entry->domains : 0;
615 intel_fw_table_check(struct drm_i915_private *dev_priv)
617 const struct intel_forcewake_range *ranges;
618 unsigned int num_ranges;
622 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
625 ranges = dev_priv->uncore.fw_domains_table;
629 num_ranges = dev_priv->uncore.fw_domains_table_entries;
631 for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
632 WARN_ON_ONCE(prev >= (s32)ranges->start);
633 prev = ranges->start;
634 WARN_ON_ONCE(prev >= (s32)ranges->end);
639 #define GEN_FW_RANGE(s, e, d) \
640 { .start = (s), .end = (e), .domains = (d) }
642 #define HAS_FWTABLE(dev_priv) \
643 (IS_GEN9(dev_priv) || \
644 IS_CHERRYVIEW(dev_priv) || \
645 IS_VALLEYVIEW(dev_priv))
647 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
648 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
649 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
650 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
651 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
652 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
653 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
654 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
655 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
658 #define __fwtable_reg_read_fw_domains(offset) \
660 enum forcewake_domains __fwd = 0; \
661 if (NEEDS_FORCE_WAKE((offset))) \
662 __fwd = find_fw_domain(dev_priv, offset); \
666 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
667 static const i915_reg_t gen8_shadowed_regs[] = {
668 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
669 GEN6_RPNSWREQ, /* 0xA008 */
670 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
671 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
672 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
673 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
674 /* TODO: Other registers are not yet used */
677 static void intel_shadow_table_check(void)
679 const i915_reg_t *reg = gen8_shadowed_regs;
684 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
687 for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
688 offset = i915_mmio_reg_offset(*reg);
689 WARN_ON_ONCE(prev >= (s32)offset);
694 static int mmio_reg_cmp(const void *key, const void *elt)
696 u32 offset = (u32)(unsigned long)key;
697 i915_reg_t *reg = (i915_reg_t *)elt;
699 if (offset < i915_mmio_reg_offset(*reg))
701 else if (offset > i915_mmio_reg_offset(*reg))
707 static bool is_gen8_shadowed(u32 offset)
711 reg = bsearch((void *)(unsigned long)offset,
712 (const void *)gen8_shadowed_regs,
713 ARRAY_SIZE(gen8_shadowed_regs),
720 #define __gen8_reg_write_fw_domains(offset) \
722 enum forcewake_domains __fwd; \
723 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
724 __fwd = FORCEWAKE_RENDER; \
730 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
731 static const struct intel_forcewake_range __chv_fw_ranges[] = {
732 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
733 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
734 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
735 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
736 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
737 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
738 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
739 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
740 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
741 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
742 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
743 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
744 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
745 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
746 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
747 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
750 #define __fwtable_reg_write_fw_domains(offset) \
752 enum forcewake_domains __fwd = 0; \
753 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
754 __fwd = find_fw_domain(dev_priv, offset); \
758 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
759 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
760 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
761 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
762 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
763 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
764 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
765 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
766 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
767 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
768 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
769 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
770 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
771 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
772 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
773 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
774 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
775 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
776 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
777 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
778 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
779 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
780 GEN_FW_RANGE(0xb480, 0xbfff, FORCEWAKE_BLITTER),
781 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
782 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
783 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
784 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
785 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
786 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
787 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
788 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
789 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
790 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
791 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
795 ilk_dummy_write(struct drm_i915_private *dev_priv)
797 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
798 * the chip from rc6 before touching it for real. MI_MODE is masked,
799 * hence harmless to write 0 into. */
800 __raw_i915_write32(dev_priv, MI_MODE, 0);
804 __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
805 const i915_reg_t reg,
809 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
810 "Unclaimed %s register 0x%x\n",
811 read ? "read from" : "write to",
812 i915_mmio_reg_offset(reg)))
813 i915.mmio_debug--; /* Only report the first N failures */
817 unclaimed_reg_debug(struct drm_i915_private *dev_priv,
818 const i915_reg_t reg,
822 if (likely(!i915.mmio_debug))
825 __unclaimed_reg_debug(dev_priv, reg, read, before);
828 #define GEN2_READ_HEADER(x) \
830 assert_rpm_wakelock_held(dev_priv);
832 #define GEN2_READ_FOOTER \
833 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
836 #define __gen2_read(x) \
838 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
839 GEN2_READ_HEADER(x); \
840 val = __raw_i915_read##x(dev_priv, reg); \
844 #define __gen5_read(x) \
846 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
847 GEN2_READ_HEADER(x); \
848 ilk_dummy_write(dev_priv); \
849 val = __raw_i915_read##x(dev_priv, reg); \
865 #undef GEN2_READ_FOOTER
866 #undef GEN2_READ_HEADER
868 #define GEN6_READ_HEADER(x) \
869 u32 offset = i915_mmio_reg_offset(reg); \
870 unsigned long irqflags; \
872 assert_rpm_wakelock_held(dev_priv); \
873 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
874 unclaimed_reg_debug(dev_priv, reg, true, true)
876 #define GEN6_READ_FOOTER \
877 unclaimed_reg_debug(dev_priv, reg, true, false); \
878 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
879 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
882 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
883 enum forcewake_domains fw_domains)
885 struct intel_uncore_forcewake_domain *domain;
887 for_each_fw_domain_masked(domain, fw_domains, dev_priv)
888 fw_domain_arm_timer(domain);
890 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
891 dev_priv->uncore.fw_domains_active |= fw_domains;
894 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
895 enum forcewake_domains fw_domains)
897 if (WARN_ON(!fw_domains))
900 /* Turn on all requested but inactive supported forcewake domains. */
901 fw_domains &= dev_priv->uncore.fw_domains;
902 fw_domains &= ~dev_priv->uncore.fw_domains_active;
905 ___force_wake_auto(dev_priv, fw_domains);
908 #define __gen6_read(x) \
910 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
911 enum forcewake_domains fw_engine; \
912 GEN6_READ_HEADER(x); \
913 fw_engine = __gen6_reg_read_fw_domains(offset); \
915 __force_wake_auto(dev_priv, fw_engine); \
916 val = __raw_i915_read##x(dev_priv, reg); \
920 #define __fwtable_read(x) \
922 fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
923 enum forcewake_domains fw_engine; \
924 GEN6_READ_HEADER(x); \
925 fw_engine = __fwtable_reg_read_fw_domains(offset); \
927 __force_wake_auto(dev_priv, fw_engine); \
928 val = __raw_i915_read##x(dev_priv, reg); \
941 #undef __fwtable_read
943 #undef GEN6_READ_FOOTER
944 #undef GEN6_READ_HEADER
946 #define VGPU_READ_HEADER(x) \
947 unsigned long irqflags; \
949 assert_rpm_device_not_suspended(dev_priv); \
950 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
952 #define VGPU_READ_FOOTER \
953 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
954 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
957 #define __vgpu_read(x) \
959 vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
960 VGPU_READ_HEADER(x); \
961 val = __raw_i915_read##x(dev_priv, reg); \
971 #undef VGPU_READ_FOOTER
972 #undef VGPU_READ_HEADER
974 #define GEN2_WRITE_HEADER \
975 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
976 assert_rpm_wakelock_held(dev_priv); \
978 #define GEN2_WRITE_FOOTER
980 #define __gen2_write(x) \
982 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
984 __raw_i915_write##x(dev_priv, reg, val); \
988 #define __gen5_write(x) \
990 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
992 ilk_dummy_write(dev_priv); \
993 __raw_i915_write##x(dev_priv, reg, val); \
1007 #undef GEN2_WRITE_FOOTER
1008 #undef GEN2_WRITE_HEADER
1010 #define GEN6_WRITE_HEADER \
1011 u32 offset = i915_mmio_reg_offset(reg); \
1012 unsigned long irqflags; \
1013 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1014 assert_rpm_wakelock_held(dev_priv); \
1015 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1016 unclaimed_reg_debug(dev_priv, reg, false, true)
1018 #define GEN6_WRITE_FOOTER \
1019 unclaimed_reg_debug(dev_priv, reg, false, false); \
1020 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1022 #define __gen6_write(x) \
1024 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1025 u32 __fifo_ret = 0; \
1026 GEN6_WRITE_HEADER; \
1027 if (NEEDS_FORCE_WAKE(offset)) { \
1028 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1030 __raw_i915_write##x(dev_priv, reg, val); \
1031 if (unlikely(__fifo_ret)) { \
1032 gen6_gt_check_fifodbg(dev_priv); \
1034 GEN6_WRITE_FOOTER; \
1037 #define __gen8_write(x) \
1039 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1040 enum forcewake_domains fw_engine; \
1041 GEN6_WRITE_HEADER; \
1042 fw_engine = __gen8_reg_write_fw_domains(offset); \
1044 __force_wake_auto(dev_priv, fw_engine); \
1045 __raw_i915_write##x(dev_priv, reg, val); \
1046 GEN6_WRITE_FOOTER; \
1049 #define __fwtable_write(x) \
1051 fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1052 enum forcewake_domains fw_engine; \
1053 GEN6_WRITE_HEADER; \
1054 fw_engine = __fwtable_reg_write_fw_domains(offset); \
1056 __force_wake_auto(dev_priv, fw_engine); \
1057 __raw_i915_write##x(dev_priv, reg, val); \
1058 GEN6_WRITE_FOOTER; \
1071 #undef __fwtable_write
1074 #undef GEN6_WRITE_FOOTER
1075 #undef GEN6_WRITE_HEADER
1077 #define VGPU_WRITE_HEADER \
1078 unsigned long irqflags; \
1079 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1080 assert_rpm_device_not_suspended(dev_priv); \
1081 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1083 #define VGPU_WRITE_FOOTER \
1084 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1086 #define __vgpu_write(x) \
1087 static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1088 i915_reg_t reg, u##x val, bool trace) { \
1089 VGPU_WRITE_HEADER; \
1090 __raw_i915_write##x(dev_priv, reg, val); \
1091 VGPU_WRITE_FOOTER; \
1099 #undef VGPU_WRITE_FOOTER
1100 #undef VGPU_WRITE_HEADER
1102 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1104 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1105 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1106 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1109 #define ASSIGN_READ_MMIO_VFUNCS(x) \
1111 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1112 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1113 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1114 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1118 static void fw_domain_init(struct drm_i915_private *dev_priv,
1119 enum forcewake_domain_id domain_id,
1123 struct intel_uncore_forcewake_domain *d;
1125 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1128 d = &dev_priv->uncore.fw_domain[domain_id];
1130 WARN_ON(d->wake_count);
1133 d->reg_set = reg_set;
1134 d->reg_ack = reg_ack;
1136 if (IS_GEN6(dev_priv)) {
1138 d->val_set = FORCEWAKE_KERNEL;
1141 /* WaRsClearFWBitsAtReset:bdw,skl */
1142 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1143 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1144 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1147 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1148 d->reg_post = FORCEWAKE_ACK_VLV;
1149 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1150 d->reg_post = ECOBUS;
1155 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1156 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1157 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1159 d->mask = 1 << domain_id;
1161 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1162 d->timer.function = intel_uncore_fw_release_timer;
1164 dev_priv->uncore.fw_domains |= (1 << domain_id);
1169 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1171 if (INTEL_INFO(dev_priv)->gen <= 5)
1174 if (IS_GEN9(dev_priv)) {
1175 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1176 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1177 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1178 FORCEWAKE_RENDER_GEN9,
1179 FORCEWAKE_ACK_RENDER_GEN9);
1180 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1181 FORCEWAKE_BLITTER_GEN9,
1182 FORCEWAKE_ACK_BLITTER_GEN9);
1183 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1184 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1185 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1186 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1187 if (!IS_CHERRYVIEW(dev_priv))
1188 dev_priv->uncore.funcs.force_wake_put =
1189 fw_domains_put_with_fifo;
1191 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1192 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1193 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1194 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1195 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1196 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1197 dev_priv->uncore.funcs.force_wake_get =
1198 fw_domains_get_with_thread_status;
1199 if (IS_HASWELL(dev_priv))
1200 dev_priv->uncore.funcs.force_wake_put =
1201 fw_domains_put_with_fifo;
1203 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1204 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1205 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1206 } else if (IS_IVYBRIDGE(dev_priv)) {
1209 /* IVB configs may use multi-threaded forcewake */
1211 /* A small trick here - if the bios hasn't configured
1212 * MT forcewake, and if the device is in RC6, then
1213 * force_wake_mt_get will not wake the device and the
1214 * ECOBUS read will return zero. Which will be
1215 * (correctly) interpreted by the test below as MT
1216 * forcewake being disabled.
1218 dev_priv->uncore.funcs.force_wake_get =
1219 fw_domains_get_with_thread_status;
1220 dev_priv->uncore.funcs.force_wake_put =
1221 fw_domains_put_with_fifo;
1223 /* We need to init first for ECOBUS access and then
1224 * determine later if we want to reinit, in case of MT access is
1225 * not working. In this stage we don't know which flavour this
1226 * ivb is, so it is better to reset also the gen6 fw registers
1227 * before the ecobus check.
1230 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1231 __raw_posting_read(dev_priv, ECOBUS);
1233 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1234 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1236 spin_lock_irq(&dev_priv->uncore.lock);
1237 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1238 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1239 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1240 spin_unlock_irq(&dev_priv->uncore.lock);
1242 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1243 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1244 DRM_INFO("when using vblank-synced partial screen updates.\n");
1245 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1246 FORCEWAKE, FORCEWAKE_ACK);
1248 } else if (IS_GEN6(dev_priv)) {
1249 dev_priv->uncore.funcs.force_wake_get =
1250 fw_domains_get_with_thread_status;
1251 dev_priv->uncore.funcs.force_wake_put =
1252 fw_domains_put_with_fifo;
1253 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1254 FORCEWAKE, FORCEWAKE_ACK);
1257 /* All future platforms are expected to require complex power gating */
1258 WARN_ON(dev_priv->uncore.fw_domains == 0);
1261 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1263 dev_priv->uncore.fw_domains_table = \
1264 (struct intel_forcewake_range *)(d); \
1265 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1268 void intel_uncore_init(struct drm_i915_private *dev_priv)
1270 i915_check_vgpu(dev_priv);
1272 intel_uncore_edram_detect(dev_priv);
1273 intel_uncore_fw_domains_init(dev_priv);
1274 __intel_uncore_early_sanitize(dev_priv, false);
1276 dev_priv->uncore.unclaimed_mmio_check = 1;
1278 switch (INTEL_INFO(dev_priv)->gen) {
1281 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1282 ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1283 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1286 if (IS_CHERRYVIEW(dev_priv)) {
1287 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1288 ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1289 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1292 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1293 ASSIGN_READ_MMIO_VFUNCS(gen6);
1298 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1300 if (IS_VALLEYVIEW(dev_priv)) {
1301 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1302 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1304 ASSIGN_READ_MMIO_VFUNCS(gen6);
1308 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1309 ASSIGN_READ_MMIO_VFUNCS(gen5);
1314 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1315 ASSIGN_READ_MMIO_VFUNCS(gen2);
1319 intel_fw_table_check(dev_priv);
1320 if (INTEL_GEN(dev_priv) >= 8)
1321 intel_shadow_table_check();
1323 if (intel_vgpu_active(dev_priv)) {
1324 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1325 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1328 i915_check_and_clear_faults(dev_priv);
1330 #undef ASSIGN_WRITE_MMIO_VFUNCS
1331 #undef ASSIGN_READ_MMIO_VFUNCS
1333 void intel_uncore_fini(struct drm_i915_private *dev_priv)
1335 /* Paranoia: make sure we have disabled everything before we exit. */
1336 intel_uncore_sanitize(dev_priv);
1337 intel_uncore_forcewake_reset(dev_priv, false);
1340 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1342 static const struct register_whitelist {
1343 i915_reg_t offset_ldw, offset_udw;
1345 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1346 uint32_t gen_bitmask;
1348 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1349 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1350 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1353 int i915_reg_read_ioctl(struct drm_device *dev,
1354 void *data, struct drm_file *file)
1356 struct drm_i915_private *dev_priv = to_i915(dev);
1357 struct drm_i915_reg_read *reg = data;
1358 struct register_whitelist const *entry = whitelist;
1360 i915_reg_t offset_ldw, offset_udw;
1363 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1364 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1365 (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
1369 if (i == ARRAY_SIZE(whitelist))
1372 /* We use the low bits to encode extra flags as the register should
1373 * be naturally aligned (and those that are not so aligned merely
1374 * limit the available flags for that register).
1376 offset_ldw = entry->offset_ldw;
1377 offset_udw = entry->offset_udw;
1379 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1381 intel_runtime_pm_get(dev_priv);
1385 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1388 reg->val = I915_READ64(offset_ldw);
1391 reg->val = I915_READ(offset_ldw);
1394 reg->val = I915_READ16(offset_ldw);
1397 reg->val = I915_READ8(offset_ldw);
1405 intel_runtime_pm_put(dev_priv);
1409 static int i915_reset_complete(struct pci_dev *pdev)
1412 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1413 return (gdrst & GRDOM_RESET_STATUS) == 0;
1416 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1418 struct pci_dev *pdev = dev_priv->drm.pdev;
1420 /* assert reset for at least 20 usec */
1421 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1423 pci_write_config_byte(pdev, I915_GDRST, 0);
1425 return wait_for(i915_reset_complete(pdev), 500);
1428 static int g4x_reset_complete(struct pci_dev *pdev)
1431 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1432 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1435 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1437 struct pci_dev *pdev = dev_priv->drm.pdev;
1438 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1439 return wait_for(g4x_reset_complete(pdev), 500);
1442 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1444 struct pci_dev *pdev = dev_priv->drm.pdev;
1447 pci_write_config_byte(pdev, I915_GDRST,
1448 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1449 ret = wait_for(g4x_reset_complete(pdev), 500);
1453 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1454 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1455 POSTING_READ(VDECCLK_GATE_D);
1457 pci_write_config_byte(pdev, I915_GDRST,
1458 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1459 ret = wait_for(g4x_reset_complete(pdev), 500);
1463 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1464 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1465 POSTING_READ(VDECCLK_GATE_D);
1467 pci_write_config_byte(pdev, I915_GDRST, 0);
1472 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1473 unsigned engine_mask)
1477 I915_WRITE(ILK_GDSR,
1478 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1479 ret = intel_wait_for_register(dev_priv,
1480 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1485 I915_WRITE(ILK_GDSR,
1486 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1487 ret = intel_wait_for_register(dev_priv,
1488 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1493 I915_WRITE(ILK_GDSR, 0);
1498 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1499 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1502 /* GEN6_GDRST is not in the gt power well, no need to check
1503 * for fifo space for the write or forcewake the chip for
1506 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1508 /* Spin waiting for the device to ack the reset requests */
1509 return intel_wait_for_register_fw(dev_priv,
1510 GEN6_GDRST, hw_domain_mask, 0,
1515 * gen6_reset_engines - reset individual engines
1516 * @dev_priv: i915 device
1517 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1519 * This function will reset the individual engines that are set in engine_mask.
1520 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1522 * Note: It is responsibility of the caller to handle the difference between
1523 * asking full domain reset versus reset for all available individual engines.
1525 * Returns 0 on success, nonzero on error.
1527 static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1528 unsigned engine_mask)
1530 struct intel_engine_cs *engine;
1531 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1532 [RCS] = GEN6_GRDOM_RENDER,
1533 [BCS] = GEN6_GRDOM_BLT,
1534 [VCS] = GEN6_GRDOM_MEDIA,
1535 [VCS2] = GEN8_GRDOM_MEDIA2,
1536 [VECS] = GEN6_GRDOM_VECS,
1541 if (engine_mask == ALL_ENGINES) {
1542 hw_mask = GEN6_GRDOM_FULL;
1547 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1548 hw_mask |= hw_engine_mask[engine->id];
1551 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1553 intel_uncore_forcewake_reset(dev_priv, true);
1559 * intel_wait_for_register_fw - wait until register matches expected state
1560 * @dev_priv: the i915 device
1561 * @reg: the register to read
1562 * @mask: mask to apply to register value
1563 * @value: expected value
1564 * @timeout_ms: timeout in millisecond
1566 * This routine waits until the target register @reg contains the expected
1567 * @value after applying the @mask, i.e. it waits until ::
1569 * (I915_READ_FW(reg) & mask) == value
1571 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1573 * Note that this routine assumes the caller holds forcewake asserted, it is
1574 * not suitable for very long waits. See intel_wait_for_register() if you
1575 * wish to wait without holding forcewake for the duration (i.e. you expect
1576 * the wait to be slow).
1578 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1580 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1584 const unsigned long timeout_ms)
1586 #define done ((I915_READ_FW(reg) & mask) == value)
1587 int ret = wait_for_us(done, 2);
1589 ret = wait_for(done, timeout_ms);
1595 * intel_wait_for_register - wait until register matches expected state
1596 * @dev_priv: the i915 device
1597 * @reg: the register to read
1598 * @mask: mask to apply to register value
1599 * @value: expected value
1600 * @timeout_ms: timeout in millisecond
1602 * This routine waits until the target register @reg contains the expected
1603 * @value after applying the @mask, i.e. it waits until ::
1605 * (I915_READ(reg) & mask) == value
1607 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1609 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1611 int intel_wait_for_register(struct drm_i915_private *dev_priv,
1615 const unsigned long timeout_ms)
1619 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1622 intel_uncore_forcewake_get(dev_priv, fw);
1623 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
1624 intel_uncore_forcewake_put(dev_priv, fw);
1626 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1632 static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1634 struct drm_i915_private *dev_priv = engine->i915;
1637 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1638 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1640 ret = intel_wait_for_register_fw(dev_priv,
1641 RING_RESET_CTL(engine->mmio_base),
1642 RESET_CTL_READY_TO_RESET,
1643 RESET_CTL_READY_TO_RESET,
1646 DRM_ERROR("%s: reset request timeout\n", engine->name);
1651 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1653 struct drm_i915_private *dev_priv = engine->i915;
1655 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1656 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1659 static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1660 unsigned engine_mask)
1662 struct intel_engine_cs *engine;
1665 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1666 if (gen8_request_engine_reset(engine))
1669 return gen6_reset_engines(dev_priv, engine_mask);
1672 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1673 gen8_unrequest_engine_reset(engine);
1678 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1680 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1685 if (INTEL_INFO(dev_priv)->gen >= 8)
1686 return gen8_reset_engines;
1687 else if (INTEL_INFO(dev_priv)->gen >= 6)
1688 return gen6_reset_engines;
1689 else if (IS_GEN5(dev_priv))
1690 return ironlake_do_reset;
1691 else if (IS_G4X(dev_priv))
1692 return g4x_do_reset;
1693 else if (IS_G33(dev_priv))
1694 return g33_do_reset;
1695 else if (INTEL_INFO(dev_priv)->gen >= 3)
1696 return i915_do_reset;
1701 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1706 reset = intel_get_gpu_reset(dev_priv);
1710 /* If the power well sleeps during the reset, the reset
1711 * request may be dropped and never completes (causing -EIO).
1713 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1714 ret = reset(dev_priv, engine_mask);
1715 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1720 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1722 return intel_get_gpu_reset(dev_priv) != NULL;
1725 int intel_guc_reset(struct drm_i915_private *dev_priv)
1728 unsigned long irqflags;
1730 if (!HAS_GUC(dev_priv))
1733 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1734 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1736 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1738 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1739 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1744 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1746 return check_for_unclaimed_mmio(dev_priv);
1750 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1752 if (unlikely(i915.mmio_debug ||
1753 dev_priv->uncore.unclaimed_mmio_check <= 0))
1756 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1757 DRM_DEBUG("Unclaimed register detected, "
1758 "enabling oneshot unclaimed register reporting. "
1759 "Please use i915.mmio_debug=N for more information.\n");
1761 dev_priv->uncore.unclaimed_mmio_check--;
1768 static enum forcewake_domains
1769 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1772 u32 offset = i915_mmio_reg_offset(reg);
1773 enum forcewake_domains fw_domains;
1775 if (HAS_FWTABLE(dev_priv)) {
1776 fw_domains = __fwtable_reg_read_fw_domains(offset);
1777 } else if (INTEL_GEN(dev_priv) >= 6) {
1778 fw_domains = __gen6_reg_read_fw_domains(offset);
1780 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1784 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1789 static enum forcewake_domains
1790 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1793 u32 offset = i915_mmio_reg_offset(reg);
1794 enum forcewake_domains fw_domains;
1796 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1797 fw_domains = __fwtable_reg_write_fw_domains(offset);
1798 } else if (IS_GEN8(dev_priv)) {
1799 fw_domains = __gen8_reg_write_fw_domains(offset);
1800 } else if (IS_GEN(dev_priv, 6, 7)) {
1801 fw_domains = FORCEWAKE_RENDER;
1803 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1807 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1813 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1815 * @dev_priv: pointer to struct drm_i915_private
1816 * @reg: register in question
1817 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1819 * Returns a set of forcewake domains required to be taken with for example
1820 * intel_uncore_forcewake_get for the specified register to be accessible in the
1821 * specified mode (read, write or read/write) with raw mmio accessors.
1823 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1824 * callers to do FIFO management on their own or risk losing writes.
1826 enum forcewake_domains
1827 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1828 i915_reg_t reg, unsigned int op)
1830 enum forcewake_domains fw_domains = 0;
1834 if (intel_vgpu_active(dev_priv))
1837 if (op & FW_REG_READ)
1838 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1840 if (op & FW_REG_WRITE)
1841 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);