2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
28 #include <linux/pm_runtime.h>
30 #define FORCEWAKE_ACK_TIMEOUT_MS 50
32 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
34 static const char * const forcewake_domain_names[] = {
41 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
45 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
46 return forcewake_domain_names[id];
54 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
56 WARN_ON(!i915_mmio_reg_valid(d->reg_set));
57 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
61 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
64 hrtimer_start_range_ns(&d->timer,
71 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
73 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
74 FORCEWAKE_KERNEL) == 0,
75 FORCEWAKE_ACK_TIMEOUT_MS))
76 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
77 intel_uncore_forcewake_domain_to_str(d->id));
81 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
83 __raw_i915_write32(d->i915, d->reg_set, d->val_set);
87 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
89 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
91 FORCEWAKE_ACK_TIMEOUT_MS))
92 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
93 intel_uncore_forcewake_domain_to_str(d->id));
97 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
99 __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
103 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
105 /* something from same cacheline, but not from the set register */
106 if (i915_mmio_reg_valid(d->reg_post))
107 __raw_posting_read(d->i915, d->reg_post);
111 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
113 struct intel_uncore_forcewake_domain *d;
115 for_each_fw_domain_masked(d, fw_domains, dev_priv) {
116 fw_domain_wait_ack_clear(d);
120 for_each_fw_domain_masked(d, fw_domains, dev_priv)
121 fw_domain_wait_ack(d);
123 dev_priv->uncore.fw_domains_active |= fw_domains;
127 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
129 struct intel_uncore_forcewake_domain *d;
131 for_each_fw_domain_masked(d, fw_domains, dev_priv) {
133 fw_domain_posting_read(d);
136 dev_priv->uncore.fw_domains_active &= ~fw_domains;
140 fw_domains_posting_read(struct drm_i915_private *dev_priv)
142 struct intel_uncore_forcewake_domain *d;
144 /* No need to do for all, just do for first found */
145 for_each_fw_domain(d, dev_priv) {
146 fw_domain_posting_read(d);
152 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
154 struct intel_uncore_forcewake_domain *d;
156 if (dev_priv->uncore.fw_domains == 0)
159 for_each_fw_domain_masked(d, fw_domains, dev_priv)
162 fw_domains_posting_read(dev_priv);
165 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
167 /* w/a for a sporadic read returning 0 by waiting for the GT
170 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
171 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
172 DRM_ERROR("GT thread status wait timed out\n");
175 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
176 enum forcewake_domains fw_domains)
178 fw_domains_get(dev_priv, fw_domains);
180 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
181 __gen6_gt_wait_for_thread_c0(dev_priv);
184 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
188 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
189 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
190 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
193 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
194 enum forcewake_domains fw_domains)
196 fw_domains_put(dev_priv, fw_domains);
197 gen6_gt_check_fifodbg(dev_priv);
200 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
202 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
204 return count & GT_FIFO_FREE_ENTRIES_MASK;
207 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
211 /* On VLV, FIFO will be shared by both SW and HW.
212 * So, we need to read the FREE_ENTRIES everytime */
213 if (IS_VALLEYVIEW(dev_priv))
214 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
216 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
218 u32 fifo = fifo_free_entries(dev_priv);
220 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
222 fifo = fifo_free_entries(dev_priv);
224 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
226 dev_priv->uncore.fifo_count = fifo;
228 dev_priv->uncore.fifo_count--;
233 static enum hrtimer_restart
234 intel_uncore_fw_release_timer(struct hrtimer *timer)
236 struct intel_uncore_forcewake_domain *domain =
237 container_of(timer, struct intel_uncore_forcewake_domain, timer);
238 struct drm_i915_private *dev_priv = domain->i915;
239 unsigned long irqflags;
241 assert_rpm_device_not_suspended(dev_priv);
243 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
244 if (WARN_ON(domain->wake_count == 0))
245 domain->wake_count++;
247 if (--domain->wake_count == 0)
248 dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
250 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
252 return HRTIMER_NORESTART;
255 void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
258 unsigned long irqflags;
259 struct intel_uncore_forcewake_domain *domain;
260 int retry_count = 100;
261 enum forcewake_domains fw, active_domains;
263 /* Hold uncore.lock across reset to prevent any register access
264 * with forcewake not set correctly. Wait until all pending
265 * timers are run before holding.
270 for_each_fw_domain(domain, dev_priv) {
271 if (hrtimer_cancel(&domain->timer) == 0)
274 intel_uncore_fw_release_timer(&domain->timer);
277 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
279 for_each_fw_domain(domain, dev_priv) {
280 if (hrtimer_active(&domain->timer))
281 active_domains |= domain->mask;
284 if (active_domains == 0)
287 if (--retry_count == 0) {
288 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
292 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
296 WARN_ON(active_domains);
298 fw = dev_priv->uncore.fw_domains_active;
300 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
302 fw_domains_reset(dev_priv, FORCEWAKE_ALL);
304 if (restore) { /* If reset with a user forcewake, try to restore */
306 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
308 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
309 dev_priv->uncore.fifo_count =
310 fifo_free_entries(dev_priv);
314 assert_forcewakes_inactive(dev_priv);
316 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
319 static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
321 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
322 const unsigned int sets[4] = { 1, 1, 2, 2 };
323 const u32 cap = dev_priv->edram_cap;
325 return EDRAM_NUM_BANKS(cap) *
326 ways[EDRAM_WAYS_IDX(cap)] *
327 sets[EDRAM_SETS_IDX(cap)] *
331 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
333 if (!HAS_EDRAM(dev_priv))
336 /* The needed capability bits for size calculation
337 * are not there with pre gen9 so return 128MB always.
339 if (INTEL_GEN(dev_priv) < 9)
340 return 128 * 1024 * 1024;
342 return gen9_edram_size(dev_priv);
345 static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
347 if (IS_HASWELL(dev_priv) ||
348 IS_BROADWELL(dev_priv) ||
349 INTEL_GEN(dev_priv) >= 9) {
350 dev_priv->edram_cap = __raw_i915_read32(dev_priv,
353 /* NB: We can't write IDICR yet because we do not have gt funcs
356 dev_priv->edram_cap = 0;
359 if (HAS_EDRAM(dev_priv))
360 DRM_INFO("Found %lluMB of eDRAM\n",
361 intel_uncore_edram_size(dev_priv) / (1024 * 1024));
365 fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
369 dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
370 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
373 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
379 vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
383 cer = __raw_i915_read32(dev_priv, CLAIM_ER);
384 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
387 __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
393 check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
395 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
396 return fpga_check_for_unclaimed_mmio(dev_priv);
398 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
399 return vlv_check_for_unclaimed_mmio(dev_priv);
404 static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
405 bool restore_forcewake)
407 struct intel_device_info *info = mkwrite_device_info(dev_priv);
409 /* clear out unclaimed reg detection bit */
410 if (check_for_unclaimed_mmio(dev_priv))
411 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
413 /* clear out old GT FIFO errors */
414 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
415 __raw_i915_write32(dev_priv, GTFIFODBG,
416 __raw_i915_read32(dev_priv, GTFIFODBG));
418 /* WaDisableShadowRegForCpd:chv */
419 if (IS_CHERRYVIEW(dev_priv)) {
420 __raw_i915_write32(dev_priv, GTFIFOCTL,
421 __raw_i915_read32(dev_priv, GTFIFOCTL) |
422 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
423 GT_FIFO_CTL_RC6_POLICY_STALL);
426 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
427 info->has_decoupled_mmio = false;
429 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
432 void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
433 bool restore_forcewake)
435 __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
436 i915_check_and_clear_faults(dev_priv);
439 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
441 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
443 /* BIOS often leaves RC6 enabled, but disable it for hw init */
444 intel_sanitize_gt_powersave(dev_priv);
447 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
448 enum forcewake_domains fw_domains)
450 struct intel_uncore_forcewake_domain *domain;
452 fw_domains &= dev_priv->uncore.fw_domains;
454 for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
455 if (domain->wake_count++)
456 fw_domains &= ~domain->mask;
460 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
464 * intel_uncore_forcewake_get - grab forcewake domain references
465 * @dev_priv: i915 device instance
466 * @fw_domains: forcewake domains to get reference on
468 * This function can be used get GT's forcewake domain references.
469 * Normal register access will handle the forcewake domains automatically.
470 * However if some sequence requires the GT to not power down a particular
471 * forcewake domains this function should be called at the beginning of the
472 * sequence. And subsequently the reference should be dropped by symmetric
473 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
474 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
476 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
477 enum forcewake_domains fw_domains)
479 unsigned long irqflags;
481 if (!dev_priv->uncore.funcs.force_wake_get)
484 assert_rpm_wakelock_held(dev_priv);
486 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
487 __intel_uncore_forcewake_get(dev_priv, fw_domains);
488 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
492 * intel_uncore_forcewake_get__locked - grab forcewake domain references
493 * @dev_priv: i915 device instance
494 * @fw_domains: forcewake domains to get reference on
496 * See intel_uncore_forcewake_get(). This variant places the onus
497 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
499 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
500 enum forcewake_domains fw_domains)
502 assert_spin_locked(&dev_priv->uncore.lock);
504 if (!dev_priv->uncore.funcs.force_wake_get)
507 __intel_uncore_forcewake_get(dev_priv, fw_domains);
510 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
511 enum forcewake_domains fw_domains)
513 struct intel_uncore_forcewake_domain *domain;
515 fw_domains &= dev_priv->uncore.fw_domains;
517 for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
518 if (WARN_ON(domain->wake_count == 0))
521 if (--domain->wake_count)
524 fw_domain_arm_timer(domain);
529 * intel_uncore_forcewake_put - release a forcewake domain reference
530 * @dev_priv: i915 device instance
531 * @fw_domains: forcewake domains to put references
533 * This function drops the device-level forcewakes for specified
534 * domains obtained by intel_uncore_forcewake_get().
536 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
537 enum forcewake_domains fw_domains)
539 unsigned long irqflags;
541 if (!dev_priv->uncore.funcs.force_wake_put)
544 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
545 __intel_uncore_forcewake_put(dev_priv, fw_domains);
546 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
550 * intel_uncore_forcewake_put__locked - grab forcewake domain references
551 * @dev_priv: i915 device instance
552 * @fw_domains: forcewake domains to get reference on
554 * See intel_uncore_forcewake_put(). This variant places the onus
555 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
557 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
558 enum forcewake_domains fw_domains)
560 assert_spin_locked(&dev_priv->uncore.lock);
562 if (!dev_priv->uncore.funcs.force_wake_put)
565 __intel_uncore_forcewake_put(dev_priv, fw_domains);
568 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
570 if (!dev_priv->uncore.funcs.force_wake_get)
573 WARN_ON(dev_priv->uncore.fw_domains_active);
576 /* We give fast paths for the really cool registers */
577 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
579 #define __gen6_reg_read_fw_domains(offset) \
581 enum forcewake_domains __fwd; \
582 if (NEEDS_FORCE_WAKE(offset)) \
583 __fwd = FORCEWAKE_RENDER; \
589 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
591 if (offset < entry->start)
593 else if (offset > entry->end)
599 /* Copied and "macroized" from lib/bsearch.c */
600 #define BSEARCH(key, base, num, cmp) ({ \
601 unsigned int start__ = 0, end__ = (num); \
602 typeof(base) result__ = NULL; \
603 while (start__ < end__) { \
604 unsigned int mid__ = start__ + (end__ - start__) / 2; \
605 int ret__ = (cmp)((key), (base) + mid__); \
608 } else if (ret__ > 0) { \
609 start__ = mid__ + 1; \
611 result__ = (base) + mid__; \
618 static enum forcewake_domains
619 find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
621 const struct intel_forcewake_range *entry;
623 entry = BSEARCH(offset,
624 dev_priv->uncore.fw_domains_table,
625 dev_priv->uncore.fw_domains_table_entries,
631 WARN(entry->domains & ~dev_priv->uncore.fw_domains,
632 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
633 entry->domains & ~dev_priv->uncore.fw_domains, offset);
635 return entry->domains;
639 intel_fw_table_check(struct drm_i915_private *dev_priv)
641 const struct intel_forcewake_range *ranges;
642 unsigned int num_ranges;
646 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
649 ranges = dev_priv->uncore.fw_domains_table;
653 num_ranges = dev_priv->uncore.fw_domains_table_entries;
655 for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
656 WARN_ON_ONCE(IS_GEN9(dev_priv) &&
657 (prev + 1) != (s32)ranges->start);
658 WARN_ON_ONCE(prev >= (s32)ranges->start);
659 prev = ranges->start;
660 WARN_ON_ONCE(prev >= (s32)ranges->end);
665 #define GEN_FW_RANGE(s, e, d) \
666 { .start = (s), .end = (e), .domains = (d) }
668 #define HAS_FWTABLE(dev_priv) \
669 (IS_GEN9(dev_priv) || \
670 IS_CHERRYVIEW(dev_priv) || \
671 IS_VALLEYVIEW(dev_priv))
673 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
674 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
675 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
676 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
677 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
678 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
679 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
680 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
681 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
684 #define __fwtable_reg_read_fw_domains(offset) \
686 enum forcewake_domains __fwd = 0; \
687 if (NEEDS_FORCE_WAKE((offset))) \
688 __fwd = find_fw_domain(dev_priv, offset); \
692 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
693 static const i915_reg_t gen8_shadowed_regs[] = {
694 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
695 GEN6_RPNSWREQ, /* 0xA008 */
696 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
697 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
698 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
699 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
700 /* TODO: Other registers are not yet used */
703 static void intel_shadow_table_check(void)
705 const i915_reg_t *reg = gen8_shadowed_regs;
710 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
713 for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
714 offset = i915_mmio_reg_offset(*reg);
715 WARN_ON_ONCE(prev >= (s32)offset);
720 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
722 u32 offset = i915_mmio_reg_offset(*reg);
726 else if (key > offset)
732 static bool is_gen8_shadowed(u32 offset)
734 const i915_reg_t *regs = gen8_shadowed_regs;
736 return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs),
740 #define __gen8_reg_write_fw_domains(offset) \
742 enum forcewake_domains __fwd; \
743 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
744 __fwd = FORCEWAKE_RENDER; \
750 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
751 static const struct intel_forcewake_range __chv_fw_ranges[] = {
752 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
753 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
754 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
755 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
756 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
757 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
758 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
759 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
760 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
761 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
762 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
763 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
764 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
765 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
766 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
767 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
770 #define __fwtable_reg_write_fw_domains(offset) \
772 enum forcewake_domains __fwd = 0; \
773 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
774 __fwd = find_fw_domain(dev_priv, offset); \
778 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
779 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
780 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
781 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
782 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
783 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
784 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
785 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
786 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
787 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
788 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
789 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
790 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
791 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
792 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
793 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
794 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
795 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
796 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
797 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
798 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
799 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
800 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
801 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
802 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
803 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
804 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
805 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
806 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
807 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
808 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
809 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
810 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
811 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
815 ilk_dummy_write(struct drm_i915_private *dev_priv)
817 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
818 * the chip from rc6 before touching it for real. MI_MODE is masked,
819 * hence harmless to write 0 into. */
820 __raw_i915_write32(dev_priv, MI_MODE, 0);
824 __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
825 const i915_reg_t reg,
829 if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
830 "Unclaimed %s register 0x%x\n",
831 read ? "read from" : "write to",
832 i915_mmio_reg_offset(reg)))
833 i915.mmio_debug--; /* Only report the first N failures */
837 unclaimed_reg_debug(struct drm_i915_private *dev_priv,
838 const i915_reg_t reg,
842 if (likely(!i915.mmio_debug))
845 __unclaimed_reg_debug(dev_priv, reg, read, before);
848 static const enum decoupled_power_domain fw2dpd_domain[] = {
849 GEN9_DECOUPLED_PD_RENDER,
850 GEN9_DECOUPLED_PD_BLITTER,
851 GEN9_DECOUPLED_PD_ALL,
852 GEN9_DECOUPLED_PD_MEDIA,
853 GEN9_DECOUPLED_PD_ALL,
854 GEN9_DECOUPLED_PD_ALL,
855 GEN9_DECOUPLED_PD_ALL
859 * Decoupled MMIO access for only 1 DWORD
861 static void __gen9_decoupled_mmio_access(struct drm_i915_private *dev_priv,
863 enum forcewake_domains fw_domain,
864 enum decoupled_ops operation)
866 enum decoupled_power_domain dp_domain;
867 u32 ctrl_reg_data = 0;
869 dp_domain = fw2dpd_domain[fw_domain - 1];
871 ctrl_reg_data |= reg;
872 ctrl_reg_data |= (operation << GEN9_DECOUPLED_OP_SHIFT);
873 ctrl_reg_data |= (dp_domain << GEN9_DECOUPLED_PD_SHIFT);
874 ctrl_reg_data |= GEN9_DECOUPLED_DW1_GO;
875 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW1, ctrl_reg_data);
877 if (wait_for_atomic((__raw_i915_read32(dev_priv,
878 GEN9_DECOUPLED_REG0_DW1) &
879 GEN9_DECOUPLED_DW1_GO) == 0,
880 FORCEWAKE_ACK_TIMEOUT_MS))
881 DRM_ERROR("Decoupled MMIO wait timed out\n");
885 __gen9_decoupled_mmio_read32(struct drm_i915_private *dev_priv,
887 enum forcewake_domains fw_domain)
889 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
890 GEN9_DECOUPLED_OP_READ);
892 return __raw_i915_read32(dev_priv, GEN9_DECOUPLED_REG0_DW0);
896 __gen9_decoupled_mmio_write(struct drm_i915_private *dev_priv,
898 enum forcewake_domains fw_domain)
901 __raw_i915_write32(dev_priv, GEN9_DECOUPLED_REG0_DW0, data);
903 __gen9_decoupled_mmio_access(dev_priv, reg, fw_domain,
904 GEN9_DECOUPLED_OP_WRITE);
908 #define GEN2_READ_HEADER(x) \
910 assert_rpm_wakelock_held(dev_priv);
912 #define GEN2_READ_FOOTER \
913 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
916 #define __gen2_read(x) \
918 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
919 GEN2_READ_HEADER(x); \
920 val = __raw_i915_read##x(dev_priv, reg); \
924 #define __gen5_read(x) \
926 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
927 GEN2_READ_HEADER(x); \
928 ilk_dummy_write(dev_priv); \
929 val = __raw_i915_read##x(dev_priv, reg); \
945 #undef GEN2_READ_FOOTER
946 #undef GEN2_READ_HEADER
948 #define GEN6_READ_HEADER(x) \
949 u32 offset = i915_mmio_reg_offset(reg); \
950 unsigned long irqflags; \
952 assert_rpm_wakelock_held(dev_priv); \
953 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
954 unclaimed_reg_debug(dev_priv, reg, true, true)
956 #define GEN6_READ_FOOTER \
957 unclaimed_reg_debug(dev_priv, reg, true, false); \
958 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
959 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
962 static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
963 enum forcewake_domains fw_domains)
965 struct intel_uncore_forcewake_domain *domain;
967 for_each_fw_domain_masked(domain, fw_domains, dev_priv)
968 fw_domain_arm_timer(domain);
970 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
973 static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
974 enum forcewake_domains fw_domains)
976 if (WARN_ON(!fw_domains))
979 /* Turn on all requested but inactive supported forcewake domains. */
980 fw_domains &= dev_priv->uncore.fw_domains;
981 fw_domains &= ~dev_priv->uncore.fw_domains_active;
984 ___force_wake_auto(dev_priv, fw_domains);
987 #define __gen6_read(x) \
989 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
990 enum forcewake_domains fw_engine; \
991 GEN6_READ_HEADER(x); \
992 fw_engine = __gen6_reg_read_fw_domains(offset); \
994 __force_wake_auto(dev_priv, fw_engine); \
995 val = __raw_i915_read##x(dev_priv, reg); \
999 #define __fwtable_read(x) \
1001 fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1002 enum forcewake_domains fw_engine; \
1003 GEN6_READ_HEADER(x); \
1004 fw_engine = __fwtable_reg_read_fw_domains(offset); \
1006 __force_wake_auto(dev_priv, fw_engine); \
1007 val = __raw_i915_read##x(dev_priv, reg); \
1011 #define __gen9_decoupled_read(x) \
1013 gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
1014 i915_reg_t reg, bool trace) { \
1015 enum forcewake_domains fw_engine; \
1016 GEN6_READ_HEADER(x); \
1017 fw_engine = __fwtable_reg_read_fw_domains(offset); \
1018 if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
1020 u32 *ptr_data = (u32 *) &val; \
1021 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
1022 *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
1026 val = __raw_i915_read##x(dev_priv, reg); \
1031 __gen9_decoupled_read(32)
1032 __gen9_decoupled_read(64)
1042 #undef __fwtable_read
1044 #undef GEN6_READ_FOOTER
1045 #undef GEN6_READ_HEADER
1047 #define VGPU_READ_HEADER(x) \
1048 unsigned long irqflags; \
1050 assert_rpm_device_not_suspended(dev_priv); \
1051 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1053 #define VGPU_READ_FOOTER \
1054 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
1055 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1058 #define __vgpu_read(x) \
1060 vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1061 VGPU_READ_HEADER(x); \
1062 val = __raw_i915_read##x(dev_priv, reg); \
1072 #undef VGPU_READ_FOOTER
1073 #undef VGPU_READ_HEADER
1075 #define GEN2_WRITE_HEADER \
1076 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1077 assert_rpm_wakelock_held(dev_priv); \
1079 #define GEN2_WRITE_FOOTER
1081 #define __gen2_write(x) \
1083 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1084 GEN2_WRITE_HEADER; \
1085 __raw_i915_write##x(dev_priv, reg, val); \
1086 GEN2_WRITE_FOOTER; \
1089 #define __gen5_write(x) \
1091 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1092 GEN2_WRITE_HEADER; \
1093 ilk_dummy_write(dev_priv); \
1094 __raw_i915_write##x(dev_priv, reg, val); \
1095 GEN2_WRITE_FOOTER; \
1108 #undef GEN2_WRITE_FOOTER
1109 #undef GEN2_WRITE_HEADER
1111 #define GEN6_WRITE_HEADER \
1112 u32 offset = i915_mmio_reg_offset(reg); \
1113 unsigned long irqflags; \
1114 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1115 assert_rpm_wakelock_held(dev_priv); \
1116 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1117 unclaimed_reg_debug(dev_priv, reg, false, true)
1119 #define GEN6_WRITE_FOOTER \
1120 unclaimed_reg_debug(dev_priv, reg, false, false); \
1121 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1123 #define __gen6_write(x) \
1125 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1126 u32 __fifo_ret = 0; \
1127 GEN6_WRITE_HEADER; \
1128 if (NEEDS_FORCE_WAKE(offset)) { \
1129 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1131 __raw_i915_write##x(dev_priv, reg, val); \
1132 if (unlikely(__fifo_ret)) { \
1133 gen6_gt_check_fifodbg(dev_priv); \
1135 GEN6_WRITE_FOOTER; \
1138 #define __gen8_write(x) \
1140 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1141 enum forcewake_domains fw_engine; \
1142 GEN6_WRITE_HEADER; \
1143 fw_engine = __gen8_reg_write_fw_domains(offset); \
1145 __force_wake_auto(dev_priv, fw_engine); \
1146 __raw_i915_write##x(dev_priv, reg, val); \
1147 GEN6_WRITE_FOOTER; \
1150 #define __fwtable_write(x) \
1152 fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1153 enum forcewake_domains fw_engine; \
1154 GEN6_WRITE_HEADER; \
1155 fw_engine = __fwtable_reg_write_fw_domains(offset); \
1157 __force_wake_auto(dev_priv, fw_engine); \
1158 __raw_i915_write##x(dev_priv, reg, val); \
1159 GEN6_WRITE_FOOTER; \
1162 #define __gen9_decoupled_write(x) \
1164 gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
1165 i915_reg_t reg, u##x val, \
1167 enum forcewake_domains fw_engine; \
1168 GEN6_WRITE_HEADER; \
1169 fw_engine = __fwtable_reg_write_fw_domains(offset); \
1170 if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
1171 __gen9_decoupled_mmio_write(dev_priv, \
1176 __raw_i915_write##x(dev_priv, reg, val); \
1177 GEN6_WRITE_FOOTER; \
1180 __gen9_decoupled_write(32)
1191 #undef __fwtable_write
1194 #undef GEN6_WRITE_FOOTER
1195 #undef GEN6_WRITE_HEADER
1197 #define VGPU_WRITE_HEADER \
1198 unsigned long irqflags; \
1199 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1200 assert_rpm_device_not_suspended(dev_priv); \
1201 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1203 #define VGPU_WRITE_FOOTER \
1204 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1206 #define __vgpu_write(x) \
1207 static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1208 i915_reg_t reg, u##x val, bool trace) { \
1209 VGPU_WRITE_HEADER; \
1210 __raw_i915_write##x(dev_priv, reg, val); \
1211 VGPU_WRITE_FOOTER; \
1219 #undef VGPU_WRITE_FOOTER
1220 #undef VGPU_WRITE_HEADER
1222 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1224 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1225 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1226 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1229 #define ASSIGN_READ_MMIO_VFUNCS(x) \
1231 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1232 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1233 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1234 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1238 static void fw_domain_init(struct drm_i915_private *dev_priv,
1239 enum forcewake_domain_id domain_id,
1243 struct intel_uncore_forcewake_domain *d;
1245 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1248 d = &dev_priv->uncore.fw_domain[domain_id];
1250 WARN_ON(d->wake_count);
1253 d->reg_set = reg_set;
1254 d->reg_ack = reg_ack;
1256 if (IS_GEN6(dev_priv)) {
1258 d->val_set = FORCEWAKE_KERNEL;
1261 /* WaRsClearFWBitsAtReset:bdw,skl */
1262 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
1263 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
1264 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
1267 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1268 d->reg_post = FORCEWAKE_ACK_VLV;
1269 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
1270 d->reg_post = ECOBUS;
1275 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1276 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1277 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1279 d->mask = 1 << domain_id;
1281 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1282 d->timer.function = intel_uncore_fw_release_timer;
1284 dev_priv->uncore.fw_domains |= (1 << domain_id);
1289 static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1291 if (INTEL_INFO(dev_priv)->gen <= 5)
1294 if (IS_GEN9(dev_priv)) {
1295 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1296 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1297 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1298 FORCEWAKE_RENDER_GEN9,
1299 FORCEWAKE_ACK_RENDER_GEN9);
1300 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1301 FORCEWAKE_BLITTER_GEN9,
1302 FORCEWAKE_ACK_BLITTER_GEN9);
1303 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1304 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1305 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1306 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1307 if (!IS_CHERRYVIEW(dev_priv))
1308 dev_priv->uncore.funcs.force_wake_put =
1309 fw_domains_put_with_fifo;
1311 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1312 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1313 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1314 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1315 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1316 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1317 dev_priv->uncore.funcs.force_wake_get =
1318 fw_domains_get_with_thread_status;
1319 if (IS_HASWELL(dev_priv))
1320 dev_priv->uncore.funcs.force_wake_put =
1321 fw_domains_put_with_fifo;
1323 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1324 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1325 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1326 } else if (IS_IVYBRIDGE(dev_priv)) {
1329 /* IVB configs may use multi-threaded forcewake */
1331 /* A small trick here - if the bios hasn't configured
1332 * MT forcewake, and if the device is in RC6, then
1333 * force_wake_mt_get will not wake the device and the
1334 * ECOBUS read will return zero. Which will be
1335 * (correctly) interpreted by the test below as MT
1336 * forcewake being disabled.
1338 dev_priv->uncore.funcs.force_wake_get =
1339 fw_domains_get_with_thread_status;
1340 dev_priv->uncore.funcs.force_wake_put =
1341 fw_domains_put_with_fifo;
1343 /* We need to init first for ECOBUS access and then
1344 * determine later if we want to reinit, in case of MT access is
1345 * not working. In this stage we don't know which flavour this
1346 * ivb is, so it is better to reset also the gen6 fw registers
1347 * before the ecobus check.
1350 __raw_i915_write32(dev_priv, FORCEWAKE, 0);
1351 __raw_posting_read(dev_priv, ECOBUS);
1353 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1354 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1356 spin_lock_irq(&dev_priv->uncore.lock);
1357 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1358 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1359 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1360 spin_unlock_irq(&dev_priv->uncore.lock);
1362 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1363 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1364 DRM_INFO("when using vblank-synced partial screen updates.\n");
1365 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1366 FORCEWAKE, FORCEWAKE_ACK);
1368 } else if (IS_GEN6(dev_priv)) {
1369 dev_priv->uncore.funcs.force_wake_get =
1370 fw_domains_get_with_thread_status;
1371 dev_priv->uncore.funcs.force_wake_put =
1372 fw_domains_put_with_fifo;
1373 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1374 FORCEWAKE, FORCEWAKE_ACK);
1377 /* All future platforms are expected to require complex power gating */
1378 WARN_ON(dev_priv->uncore.fw_domains == 0);
1381 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1383 dev_priv->uncore.fw_domains_table = \
1384 (struct intel_forcewake_range *)(d); \
1385 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1388 void intel_uncore_init(struct drm_i915_private *dev_priv)
1390 i915_check_vgpu(dev_priv);
1392 intel_uncore_edram_detect(dev_priv);
1393 intel_uncore_fw_domains_init(dev_priv);
1394 __intel_uncore_early_sanitize(dev_priv, false);
1396 dev_priv->uncore.unclaimed_mmio_check = 1;
1398 switch (INTEL_INFO(dev_priv)->gen) {
1401 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
1402 ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1403 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1404 if (HAS_DECOUPLED_MMIO(dev_priv)) {
1405 dev_priv->uncore.funcs.mmio_readl =
1406 gen9_decoupled_read32;
1407 dev_priv->uncore.funcs.mmio_readq =
1408 gen9_decoupled_read64;
1409 dev_priv->uncore.funcs.mmio_writel =
1410 gen9_decoupled_write32;
1414 if (IS_CHERRYVIEW(dev_priv)) {
1415 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
1416 ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
1417 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1420 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1421 ASSIGN_READ_MMIO_VFUNCS(gen6);
1426 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1428 if (IS_VALLEYVIEW(dev_priv)) {
1429 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
1430 ASSIGN_READ_MMIO_VFUNCS(fwtable);
1432 ASSIGN_READ_MMIO_VFUNCS(gen6);
1436 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1437 ASSIGN_READ_MMIO_VFUNCS(gen5);
1442 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1443 ASSIGN_READ_MMIO_VFUNCS(gen2);
1447 intel_fw_table_check(dev_priv);
1448 if (INTEL_GEN(dev_priv) >= 8)
1449 intel_shadow_table_check();
1451 if (intel_vgpu_active(dev_priv)) {
1452 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1453 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1456 i915_check_and_clear_faults(dev_priv);
1458 #undef ASSIGN_WRITE_MMIO_VFUNCS
1459 #undef ASSIGN_READ_MMIO_VFUNCS
1461 void intel_uncore_fini(struct drm_i915_private *dev_priv)
1463 /* Paranoia: make sure we have disabled everything before we exit. */
1464 intel_uncore_sanitize(dev_priv);
1465 intel_uncore_forcewake_reset(dev_priv, false);
1468 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1470 static const struct register_whitelist {
1471 i915_reg_t offset_ldw, offset_udw;
1473 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1474 uint32_t gen_bitmask;
1476 { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1477 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1478 .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
1481 int i915_reg_read_ioctl(struct drm_device *dev,
1482 void *data, struct drm_file *file)
1484 struct drm_i915_private *dev_priv = to_i915(dev);
1485 struct drm_i915_reg_read *reg = data;
1486 struct register_whitelist const *entry = whitelist;
1488 i915_reg_t offset_ldw, offset_udw;
1491 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1492 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1493 (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
1497 if (i == ARRAY_SIZE(whitelist))
1500 /* We use the low bits to encode extra flags as the register should
1501 * be naturally aligned (and those that are not so aligned merely
1502 * limit the available flags for that register).
1504 offset_ldw = entry->offset_ldw;
1505 offset_udw = entry->offset_udw;
1507 size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
1509 intel_runtime_pm_get(dev_priv);
1513 reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
1516 reg->val = I915_READ64(offset_ldw);
1519 reg->val = I915_READ(offset_ldw);
1522 reg->val = I915_READ16(offset_ldw);
1525 reg->val = I915_READ8(offset_ldw);
1533 intel_runtime_pm_put(dev_priv);
1537 static int i915_reset_complete(struct pci_dev *pdev)
1540 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1541 return (gdrst & GRDOM_RESET_STATUS) == 0;
1544 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1546 struct pci_dev *pdev = dev_priv->drm.pdev;
1548 /* assert reset for at least 20 usec */
1549 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1551 pci_write_config_byte(pdev, I915_GDRST, 0);
1553 return wait_for(i915_reset_complete(pdev), 500);
1556 static int g4x_reset_complete(struct pci_dev *pdev)
1559 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1560 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1563 static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1565 struct pci_dev *pdev = dev_priv->drm.pdev;
1566 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1567 return wait_for(g4x_reset_complete(pdev), 500);
1570 static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1572 struct pci_dev *pdev = dev_priv->drm.pdev;
1575 pci_write_config_byte(pdev, I915_GDRST,
1576 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1577 ret = wait_for(g4x_reset_complete(pdev), 500);
1581 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1582 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1583 POSTING_READ(VDECCLK_GATE_D);
1585 pci_write_config_byte(pdev, I915_GDRST,
1586 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1587 ret = wait_for(g4x_reset_complete(pdev), 500);
1591 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1592 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1593 POSTING_READ(VDECCLK_GATE_D);
1595 pci_write_config_byte(pdev, I915_GDRST, 0);
1600 static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1601 unsigned engine_mask)
1605 I915_WRITE(ILK_GDSR,
1606 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1607 ret = intel_wait_for_register(dev_priv,
1608 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1613 I915_WRITE(ILK_GDSR,
1614 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1615 ret = intel_wait_for_register(dev_priv,
1616 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1621 I915_WRITE(ILK_GDSR, 0);
1626 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1627 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1630 /* GEN6_GDRST is not in the gt power well, no need to check
1631 * for fifo space for the write or forcewake the chip for
1634 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1636 /* Spin waiting for the device to ack the reset requests */
1637 return intel_wait_for_register_fw(dev_priv,
1638 GEN6_GDRST, hw_domain_mask, 0,
1643 * gen6_reset_engines - reset individual engines
1644 * @dev_priv: i915 device
1645 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1647 * This function will reset the individual engines that are set in engine_mask.
1648 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1650 * Note: It is responsibility of the caller to handle the difference between
1651 * asking full domain reset versus reset for all available individual engines.
1653 * Returns 0 on success, nonzero on error.
1655 static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1656 unsigned engine_mask)
1658 struct intel_engine_cs *engine;
1659 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1660 [RCS] = GEN6_GRDOM_RENDER,
1661 [BCS] = GEN6_GRDOM_BLT,
1662 [VCS] = GEN6_GRDOM_MEDIA,
1663 [VCS2] = GEN8_GRDOM_MEDIA2,
1664 [VECS] = GEN6_GRDOM_VECS,
1669 if (engine_mask == ALL_ENGINES) {
1670 hw_mask = GEN6_GRDOM_FULL;
1675 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1676 hw_mask |= hw_engine_mask[engine->id];
1679 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1681 intel_uncore_forcewake_reset(dev_priv, true);
1687 * intel_wait_for_register_fw - wait until register matches expected state
1688 * @dev_priv: the i915 device
1689 * @reg: the register to read
1690 * @mask: mask to apply to register value
1691 * @value: expected value
1692 * @timeout_ms: timeout in millisecond
1694 * This routine waits until the target register @reg contains the expected
1695 * @value after applying the @mask, i.e. it waits until ::
1697 * (I915_READ_FW(reg) & mask) == value
1699 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1701 * Note that this routine assumes the caller holds forcewake asserted, it is
1702 * not suitable for very long waits. See intel_wait_for_register() if you
1703 * wish to wait without holding forcewake for the duration (i.e. you expect
1704 * the wait to be slow).
1706 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1708 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1712 const unsigned long timeout_ms)
1714 #define done ((I915_READ_FW(reg) & mask) == value)
1715 int ret = wait_for_us(done, 2);
1717 ret = wait_for(done, timeout_ms);
1723 * intel_wait_for_register - wait until register matches expected state
1724 * @dev_priv: the i915 device
1725 * @reg: the register to read
1726 * @mask: mask to apply to register value
1727 * @value: expected value
1728 * @timeout_ms: timeout in millisecond
1730 * This routine waits until the target register @reg contains the expected
1731 * @value after applying the @mask, i.e. it waits until ::
1733 * (I915_READ(reg) & mask) == value
1735 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1737 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1739 int intel_wait_for_register(struct drm_i915_private *dev_priv,
1743 const unsigned long timeout_ms)
1747 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1750 intel_uncore_forcewake_get(dev_priv, fw);
1751 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
1752 intel_uncore_forcewake_put(dev_priv, fw);
1754 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1760 static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1762 struct drm_i915_private *dev_priv = engine->i915;
1765 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1766 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1768 ret = intel_wait_for_register_fw(dev_priv,
1769 RING_RESET_CTL(engine->mmio_base),
1770 RESET_CTL_READY_TO_RESET,
1771 RESET_CTL_READY_TO_RESET,
1774 DRM_ERROR("%s: reset request timeout\n", engine->name);
1779 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1781 struct drm_i915_private *dev_priv = engine->i915;
1783 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1784 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1787 static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1788 unsigned engine_mask)
1790 struct intel_engine_cs *engine;
1793 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1794 if (gen8_request_engine_reset(engine))
1797 return gen6_reset_engines(dev_priv, engine_mask);
1800 for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
1801 gen8_unrequest_engine_reset(engine);
1806 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1808 static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1813 if (INTEL_INFO(dev_priv)->gen >= 8)
1814 return gen8_reset_engines;
1815 else if (INTEL_INFO(dev_priv)->gen >= 6)
1816 return gen6_reset_engines;
1817 else if (IS_GEN5(dev_priv))
1818 return ironlake_do_reset;
1819 else if (IS_G4X(dev_priv))
1820 return g4x_do_reset;
1821 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1822 return g33_do_reset;
1823 else if (INTEL_INFO(dev_priv)->gen >= 3)
1824 return i915_do_reset;
1829 int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1834 reset = intel_get_gpu_reset(dev_priv);
1838 /* If the power well sleeps during the reset, the reset
1839 * request may be dropped and never completes (causing -EIO).
1841 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1842 ret = reset(dev_priv, engine_mask);
1843 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1848 bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1850 return intel_get_gpu_reset(dev_priv) != NULL;
1853 int intel_guc_reset(struct drm_i915_private *dev_priv)
1856 unsigned long irqflags;
1858 if (!HAS_GUC(dev_priv))
1861 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1862 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1864 ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
1866 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1867 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1872 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
1874 return check_for_unclaimed_mmio(dev_priv);
1878 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
1880 if (unlikely(i915.mmio_debug ||
1881 dev_priv->uncore.unclaimed_mmio_check <= 0))
1884 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
1885 DRM_DEBUG("Unclaimed register detected, "
1886 "enabling oneshot unclaimed register reporting. "
1887 "Please use i915.mmio_debug=N for more information.\n");
1889 dev_priv->uncore.unclaimed_mmio_check--;
1896 static enum forcewake_domains
1897 intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1900 u32 offset = i915_mmio_reg_offset(reg);
1901 enum forcewake_domains fw_domains;
1903 if (HAS_FWTABLE(dev_priv)) {
1904 fw_domains = __fwtable_reg_read_fw_domains(offset);
1905 } else if (INTEL_GEN(dev_priv) >= 6) {
1906 fw_domains = __gen6_reg_read_fw_domains(offset);
1908 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1912 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1917 static enum forcewake_domains
1918 intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1921 u32 offset = i915_mmio_reg_offset(reg);
1922 enum forcewake_domains fw_domains;
1924 if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
1925 fw_domains = __fwtable_reg_write_fw_domains(offset);
1926 } else if (IS_GEN8(dev_priv)) {
1927 fw_domains = __gen8_reg_write_fw_domains(offset);
1928 } else if (IS_GEN(dev_priv, 6, 7)) {
1929 fw_domains = FORCEWAKE_RENDER;
1931 WARN_ON(!IS_GEN(dev_priv, 2, 5));
1935 WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
1941 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1943 * @dev_priv: pointer to struct drm_i915_private
1944 * @reg: register in question
1945 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1947 * Returns a set of forcewake domains required to be taken with for example
1948 * intel_uncore_forcewake_get for the specified register to be accessible in the
1949 * specified mode (read, write or read/write) with raw mmio accessors.
1951 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1952 * callers to do FIFO management on their own or risk losing writes.
1954 enum forcewake_domains
1955 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
1956 i915_reg_t reg, unsigned int op)
1958 enum forcewake_domains fw_domains = 0;
1962 if (intel_vgpu_active(dev_priv))
1965 if (op & FW_REG_READ)
1966 fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
1968 if (op & FW_REG_WRITE)
1969 fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);