2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
33 #include <drm/i915_drm.h>
34 #include "i915_trace.h"
35 #include "intel_drv.h"
37 /* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
40 #define LEGACY_REQUEST_SIZE 200
42 int __intel_ring_space(int head, int tail, int size)
44 int space = head - tail;
47 return space - I915_RING_FREE_SPACE;
50 void intel_ring_update_space(struct intel_ring *ring)
52 if (ring->last_retired_head != -1) {
53 ring->head = ring->last_retired_head;
54 ring->last_retired_head = -1;
57 ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
58 ring->tail, ring->size);
62 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
64 struct intel_ring *ring = req->ring;
70 if (mode & EMIT_INVALIDATE)
73 ret = intel_ring_begin(req, 2);
77 intel_ring_emit(ring, cmd);
78 intel_ring_emit(ring, MI_NOOP);
79 intel_ring_advance(ring);
85 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
87 struct intel_ring *ring = req->ring;
94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
96 * also flushed at 2d versus 3d pipeline switches.
100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101 * MI_READ_FLUSH is set, and is always flushed on 965.
103 * I915_GEM_DOMAIN_COMMAND may not exist?
105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106 * invalidated when MI_EXE_FLUSH is set.
108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109 * invalidated with every MI_FLUSH.
113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116 * are flushed at any MI_FLUSH.
120 if (mode & EMIT_INVALIDATE) {
122 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
123 cmd |= MI_INVALIDATE_ISP;
126 ret = intel_ring_begin(req, 2);
130 intel_ring_emit(ring, cmd);
131 intel_ring_emit(ring, MI_NOOP);
132 intel_ring_advance(ring);
138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139 * implementing two workarounds on gen6. From section 1.4.7.1
140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143 * produced by non-pipelined state commands), software needs to first
144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150 * And the workaround for these two requires this workaround first:
152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153 * BEFORE the pipe-control with a post-sync op and no write-cache
156 * And this last workaround is tricky because of the requirements on
157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
160 * "1 of the following must also be set:
161 * - Render Target Cache Flush Enable ([12] of DW1)
162 * - Depth Cache Flush Enable ([0] of DW1)
163 * - Stall at Pixel Scoreboard ([1] of DW1)
164 * - Depth Stall ([13] of DW1)
165 * - Post-Sync Operation ([13] of DW1)
166 * - Notify Enable ([8] of DW1)"
168 * The cache flushes require the workaround flush that triggered this
169 * one, so we can't use it. Depth stall would trigger the same.
170 * Post-sync nonzero is what triggered this second workaround, so we
171 * can't use that one either. Notify enable is IRQs, which aren't
172 * really our business. That leaves only stall at scoreboard.
175 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
177 struct intel_ring *ring = req->ring;
179 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
182 ret = intel_ring_begin(req, 6);
186 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
188 PIPE_CONTROL_STALL_AT_SCOREBOARD);
189 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
190 intel_ring_emit(ring, 0); /* low dword */
191 intel_ring_emit(ring, 0); /* high dword */
192 intel_ring_emit(ring, MI_NOOP);
193 intel_ring_advance(ring);
195 ret = intel_ring_begin(req, 6);
199 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, 0);
204 intel_ring_emit(ring, MI_NOOP);
205 intel_ring_advance(ring);
211 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
213 struct intel_ring *ring = req->ring;
215 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
219 /* Force SNB workarounds for PIPE_CONTROL flushes */
220 ret = intel_emit_post_sync_nonzero_flush(req);
224 /* Just flush everything. Experiments have shown that reducing the
225 * number of bits based on the write domains has little performance
228 if (mode & EMIT_FLUSH) {
229 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
230 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
232 * Ensure that any following seqno writes only happen
233 * when the render cache is indeed flushed.
235 flags |= PIPE_CONTROL_CS_STALL;
237 if (mode & EMIT_INVALIDATE) {
238 flags |= PIPE_CONTROL_TLB_INVALIDATE;
239 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
240 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
241 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
245 * TLB invalidate requires a post-sync write.
247 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
250 ret = intel_ring_begin(req, 4);
254 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
255 intel_ring_emit(ring, flags);
256 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
257 intel_ring_emit(ring, 0);
258 intel_ring_advance(ring);
264 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
266 struct intel_ring *ring = req->ring;
269 ret = intel_ring_begin(req, 4);
273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274 intel_ring_emit(ring,
275 PIPE_CONTROL_CS_STALL |
276 PIPE_CONTROL_STALL_AT_SCOREBOARD);
277 intel_ring_emit(ring, 0);
278 intel_ring_emit(ring, 0);
279 intel_ring_advance(ring);
285 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
287 struct intel_ring *ring = req->ring;
289 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
294 * Ensure that any following seqno writes only happen when the render
295 * cache is indeed flushed.
297 * Workaround: 4th PIPE_CONTROL command (except the ones with only
298 * read-cache invalidate bits set) must have the CS_STALL bit set. We
299 * don't try to be clever and just set it unconditionally.
301 flags |= PIPE_CONTROL_CS_STALL;
303 /* Just flush everything. Experiments have shown that reducing the
304 * number of bits based on the write domains has little performance
307 if (mode & EMIT_FLUSH) {
308 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
309 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
310 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
311 flags |= PIPE_CONTROL_FLUSH_ENABLE;
313 if (mode & EMIT_INVALIDATE) {
314 flags |= PIPE_CONTROL_TLB_INVALIDATE;
315 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
317 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
318 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
319 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
320 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
322 * TLB invalidate requires a post-sync write.
324 flags |= PIPE_CONTROL_QW_WRITE;
325 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
327 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
329 /* Workaround: we must issue a pipe_control with CS-stall bit
330 * set before a pipe_control command that has the state cache
331 * invalidate bit set. */
332 gen7_render_ring_cs_stall_wa(req);
335 ret = intel_ring_begin(req, 4);
339 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
340 intel_ring_emit(ring, flags);
341 intel_ring_emit(ring, scratch_addr);
342 intel_ring_emit(ring, 0);
343 intel_ring_advance(ring);
349 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
350 u32 flags, u32 scratch_addr)
352 struct intel_ring *ring = req->ring;
355 ret = intel_ring_begin(req, 6);
359 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
360 intel_ring_emit(ring, flags);
361 intel_ring_emit(ring, scratch_addr);
362 intel_ring_emit(ring, 0);
363 intel_ring_emit(ring, 0);
364 intel_ring_emit(ring, 0);
365 intel_ring_advance(ring);
371 gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
374 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
378 flags |= PIPE_CONTROL_CS_STALL;
380 if (mode & EMIT_FLUSH) {
381 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
382 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
383 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
384 flags |= PIPE_CONTROL_FLUSH_ENABLE;
386 if (mode & EMIT_INVALIDATE) {
387 flags |= PIPE_CONTROL_TLB_INVALIDATE;
388 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
389 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
390 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
391 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
392 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
393 flags |= PIPE_CONTROL_QW_WRITE;
394 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
396 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
397 ret = gen8_emit_pipe_control(req,
398 PIPE_CONTROL_CS_STALL |
399 PIPE_CONTROL_STALL_AT_SCOREBOARD,
405 return gen8_emit_pipe_control(req, flags, scratch_addr);
408 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
410 struct drm_i915_private *dev_priv = engine->i915;
413 addr = dev_priv->status_page_dmah->busaddr;
414 if (INTEL_GEN(dev_priv) >= 4)
415 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
416 I915_WRITE(HWS_PGA, addr);
419 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
421 struct drm_i915_private *dev_priv = engine->i915;
424 /* The ring status page addresses are no longer next to the rest of
425 * the ring registers as of gen7.
427 if (IS_GEN7(dev_priv)) {
428 switch (engine->id) {
430 mmio = RENDER_HWS_PGA_GEN7;
433 mmio = BLT_HWS_PGA_GEN7;
436 * VCS2 actually doesn't exist on Gen7. Only shut up
437 * gcc switch check warning
441 mmio = BSD_HWS_PGA_GEN7;
444 mmio = VEBOX_HWS_PGA_GEN7;
447 } else if (IS_GEN6(dev_priv)) {
448 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
450 /* XXX: gen8 returns to sanity */
451 mmio = RING_HWS_PGA(engine->mmio_base);
454 I915_WRITE(mmio, engine->status_page.ggtt_offset);
458 * Flush the TLB for this page
460 * FIXME: These two bits have disappeared on gen8, so a question
461 * arises: do we still need this and if so how should we go about
462 * invalidating the TLB?
464 if (IS_GEN(dev_priv, 6, 7)) {
465 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
467 /* ring should be idle before issuing a sync flush*/
468 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
471 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
473 if (intel_wait_for_register(dev_priv,
474 reg, INSTPM_SYNC_FLUSH, 0,
476 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
481 static bool stop_ring(struct intel_engine_cs *engine)
483 struct drm_i915_private *dev_priv = engine->i915;
485 if (INTEL_GEN(dev_priv) > 2) {
486 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
487 if (intel_wait_for_register(dev_priv,
488 RING_MI_MODE(engine->mmio_base),
492 DRM_ERROR("%s : timed out trying to stop ring\n",
494 /* Sometimes we observe that the idle flag is not
495 * set even though the ring is empty. So double
496 * check before giving up.
498 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
503 I915_WRITE_CTL(engine, 0);
504 I915_WRITE_HEAD(engine, 0);
505 I915_WRITE_TAIL(engine, 0);
507 if (INTEL_GEN(dev_priv) > 2) {
508 (void)I915_READ_CTL(engine);
509 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
512 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
515 static int init_ring_common(struct intel_engine_cs *engine)
517 struct drm_i915_private *dev_priv = engine->i915;
518 struct intel_ring *ring = engine->buffer;
521 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
523 if (!stop_ring(engine)) {
524 /* G45 ring initialization often fails to reset head to zero */
525 DRM_DEBUG_KMS("%s head not reset to zero "
526 "ctl %08x head %08x tail %08x start %08x\n",
528 I915_READ_CTL(engine),
529 I915_READ_HEAD(engine),
530 I915_READ_TAIL(engine),
531 I915_READ_START(engine));
533 if (!stop_ring(engine)) {
534 DRM_ERROR("failed to set %s head to zero "
535 "ctl %08x head %08x tail %08x start %08x\n",
537 I915_READ_CTL(engine),
538 I915_READ_HEAD(engine),
539 I915_READ_TAIL(engine),
540 I915_READ_START(engine));
546 if (HWS_NEEDS_PHYSICAL(dev_priv))
547 ring_setup_phys_status_page(engine);
549 intel_ring_setup_status_page(engine);
551 intel_engine_reset_breadcrumbs(engine);
553 /* Enforce ordering by reading HEAD register back */
554 I915_READ_HEAD(engine);
556 /* Initialize the ring. This must happen _after_ we've cleared the ring
557 * registers with the above sequence (the readback of the HEAD registers
558 * also enforces ordering), otherwise the hw might lose the new ring
559 * register values. */
560 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
562 /* WaClearRingBufHeadRegAtInit:ctg,elk */
563 if (I915_READ_HEAD(engine))
564 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
565 engine->name, I915_READ_HEAD(engine));
567 intel_ring_update_space(ring);
568 I915_WRITE_HEAD(engine, ring->head);
569 I915_WRITE_TAIL(engine, ring->tail);
570 (void)I915_READ_TAIL(engine);
572 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
574 /* If the head is still not zero, the ring is dead */
575 if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
576 RING_VALID, RING_VALID,
578 DRM_ERROR("%s initialization failed "
579 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
581 I915_READ_CTL(engine),
582 I915_READ_CTL(engine) & RING_VALID,
583 I915_READ_HEAD(engine), ring->head,
584 I915_READ_TAIL(engine), ring->tail,
585 I915_READ_START(engine),
586 i915_ggtt_offset(ring->vma));
591 intel_engine_init_hangcheck(engine);
594 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
599 static void reset_ring_common(struct intel_engine_cs *engine,
600 struct drm_i915_gem_request *request)
602 struct intel_ring *ring = request->ring;
604 ring->head = request->postfix;
605 ring->last_retired_head = -1;
608 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
610 struct intel_ring *ring = req->ring;
611 struct i915_workarounds *w = &req->i915->workarounds;
617 ret = req->engine->emit_flush(req, EMIT_BARRIER);
621 ret = intel_ring_begin(req, (w->count * 2 + 2));
625 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
626 for (i = 0; i < w->count; i++) {
627 intel_ring_emit_reg(ring, w->reg[i].addr);
628 intel_ring_emit(ring, w->reg[i].value);
630 intel_ring_emit(ring, MI_NOOP);
632 intel_ring_advance(ring);
634 ret = req->engine->emit_flush(req, EMIT_BARRIER);
638 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
643 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
647 ret = intel_ring_workarounds_emit(req);
651 ret = i915_gem_render_state_emit(req);
658 static int wa_add(struct drm_i915_private *dev_priv,
660 const u32 mask, const u32 val)
662 const u32 idx = dev_priv->workarounds.count;
664 if (WARN_ON(idx >= I915_MAX_WA_REGS))
667 dev_priv->workarounds.reg[idx].addr = addr;
668 dev_priv->workarounds.reg[idx].value = val;
669 dev_priv->workarounds.reg[idx].mask = mask;
671 dev_priv->workarounds.count++;
676 #define WA_REG(addr, mask, val) do { \
677 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
682 #define WA_SET_BIT_MASKED(addr, mask) \
683 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
685 #define WA_CLR_BIT_MASKED(addr, mask) \
686 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
688 #define WA_SET_FIELD_MASKED(addr, mask, value) \
689 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
691 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
692 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
694 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
696 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
699 struct drm_i915_private *dev_priv = engine->i915;
700 struct i915_workarounds *wa = &dev_priv->workarounds;
701 const uint32_t index = wa->hw_whitelist_count[engine->id];
703 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
706 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
707 i915_mmio_reg_offset(reg));
708 wa->hw_whitelist_count[engine->id]++;
713 static int gen8_init_workarounds(struct intel_engine_cs *engine)
715 struct drm_i915_private *dev_priv = engine->i915;
717 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
719 /* WaDisableAsyncFlipPerfMode:bdw,chv */
720 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
722 /* WaDisablePartialInstShootdown:bdw,chv */
723 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
724 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
726 /* Use Force Non-Coherent whenever executing a 3D context. This is a
727 * workaround for for a possible hang in the unlikely event a TLB
728 * invalidation occurs during a PSD flush.
730 /* WaForceEnableNonCoherent:bdw,chv */
731 /* WaHdcDisableFetchWhenMasked:bdw,chv */
732 WA_SET_BIT_MASKED(HDC_CHICKEN0,
733 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
734 HDC_FORCE_NON_COHERENT);
736 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
737 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
738 * polygons in the same 8x4 pixel/sample area to be processed without
739 * stalling waiting for the earlier ones to write to Hierarchical Z
742 * This optimization is off by default for BDW and CHV; turn it on.
744 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
746 /* Wa4x4STCOptimizationDisable:bdw,chv */
747 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
750 * BSpec recommends 8x4 when MSAA is used,
751 * however in practice 16x4 seems fastest.
753 * Note that PS/WM thread counts depend on the WIZ hashing
754 * disable bit, which we don't touch here, but it's good
755 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
757 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
758 GEN6_WIZ_HASHING_MASK,
759 GEN6_WIZ_HASHING_16x4);
764 static int bdw_init_workarounds(struct intel_engine_cs *engine)
766 struct drm_i915_private *dev_priv = engine->i915;
769 ret = gen8_init_workarounds(engine);
773 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
774 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
776 /* WaDisableDopClockGating:bdw */
777 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
778 DOP_CLOCK_GATING_DISABLE);
780 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
781 GEN8_SAMPLER_POWER_BYPASS_DIS);
783 WA_SET_BIT_MASKED(HDC_CHICKEN0,
784 /* WaForceContextSaveRestoreNonCoherent:bdw */
785 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
786 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
787 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
792 static int chv_init_workarounds(struct intel_engine_cs *engine)
794 struct drm_i915_private *dev_priv = engine->i915;
797 ret = gen8_init_workarounds(engine);
801 /* WaDisableThreadStallDopClockGating:chv */
802 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
804 /* Improve HiZ throughput on CHV. */
805 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
810 static int gen9_init_workarounds(struct intel_engine_cs *engine)
812 struct drm_i915_private *dev_priv = engine->i915;
815 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
816 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
818 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
819 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
820 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
822 /* WaDisableKillLogic:bxt,skl,kbl */
823 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
826 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
827 /* WaDisablePartialInstShootdown:skl,bxt,kbl */
828 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
829 FLOW_CONTROL_ENABLE |
830 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
832 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
833 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
834 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
836 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
837 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
838 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
839 GEN9_DG_MIRROR_FIX_ENABLE);
841 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
842 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
843 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
844 GEN9_RHWO_OPTIMIZATION_DISABLE);
846 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
847 * but we do that in per ctx batchbuffer as there is an issue
848 * with this register not getting restored on ctx restore
852 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
853 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
854 GEN9_ENABLE_GPGPU_PREEMPTION);
856 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
857 /* WaDisablePartialResolveInVc:skl,bxt,kbl */
858 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
859 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
861 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
862 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
863 GEN9_CCS_TLB_PREFETCH_ENABLE);
865 /* WaDisableMaskBasedCammingInRCC:bxt */
866 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
867 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
868 PIXEL_MASK_CAMMING_DISABLE);
870 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
871 WA_SET_BIT_MASKED(HDC_CHICKEN0,
872 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
873 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
875 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
876 * both tied to WaForceContextSaveRestoreNonCoherent
877 * in some hsds for skl. We keep the tie for all gen9. The
878 * documentation is a bit hazy and so we want to get common behaviour,
879 * even though there is no clear evidence we would need both on kbl/bxt.
880 * This area has been source of system hangs so we play it safe
881 * and mimic the skl regardless of what bspec says.
883 * Use Force Non-Coherent whenever executing a 3D context. This
884 * is a workaround for a possible hang in the unlikely event
885 * a TLB invalidation occurs during a PSD flush.
888 /* WaForceEnableNonCoherent:skl,bxt,kbl */
889 WA_SET_BIT_MASKED(HDC_CHICKEN0,
890 HDC_FORCE_NON_COHERENT);
892 /* WaDisableHDCInvalidation:skl,bxt,kbl */
893 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
894 BDW_DISABLE_HDC_INVALIDATION);
896 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
897 if (IS_SKYLAKE(dev_priv) ||
898 IS_KABYLAKE(dev_priv) ||
899 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
900 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
901 GEN8_SAMPLER_POWER_BYPASS_DIS);
903 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
904 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
906 /* WaOCLCoherentLineFlush:skl,bxt,kbl */
907 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
908 GEN8_LQSC_FLUSH_COHERENT_LINES));
910 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
911 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
915 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
916 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
920 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
921 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
928 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
930 struct drm_i915_private *dev_priv = engine->i915;
931 u8 vals[3] = { 0, 0, 0 };
934 for (i = 0; i < 3; i++) {
938 * Only consider slices where one, and only one, subslice has 7
941 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
945 * subslice_7eu[i] != 0 (because of the check above) and
946 * ss_max == 4 (maximum number of subslices possible per slice)
950 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
954 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
957 /* Tune IZ hashing. See intel_device_info_runtime_init() */
958 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
959 GEN9_IZ_HASHING_MASK(2) |
960 GEN9_IZ_HASHING_MASK(1) |
961 GEN9_IZ_HASHING_MASK(0),
962 GEN9_IZ_HASHING(2, vals[2]) |
963 GEN9_IZ_HASHING(1, vals[1]) |
964 GEN9_IZ_HASHING(0, vals[0]));
969 static int skl_init_workarounds(struct intel_engine_cs *engine)
971 struct drm_i915_private *dev_priv = engine->i915;
974 ret = gen9_init_workarounds(engine);
979 * Actual WA is to disable percontext preemption granularity control
980 * until D0 which is the default case so this is equivalent to
981 * !WaDisablePerCtxtPreemptionGranularityControl:skl
983 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
984 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
986 /* WaEnableGapsTsvCreditFix:skl */
987 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
988 GEN9_GAPS_TSV_CREDIT_DISABLE));
990 /* WaDisableGafsUnitClkGating:skl */
991 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
993 /* WaInPlaceDecompressionHang:skl */
994 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
995 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
996 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
998 /* WaDisableLSQCROPERFforOCL:skl */
999 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1003 return skl_tune_iz_hashing(engine);
1006 static int bxt_init_workarounds(struct intel_engine_cs *engine)
1008 struct drm_i915_private *dev_priv = engine->i915;
1011 ret = gen9_init_workarounds(engine);
1015 /* WaStoreMultiplePTEenable:bxt */
1016 /* This is a requirement according to Hardware specification */
1017 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1018 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1020 /* WaSetClckGatingDisableMedia:bxt */
1021 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1022 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1023 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1026 /* WaDisableThreadStallDopClockGating:bxt */
1027 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1028 STALL_DOP_GATING_DISABLE);
1030 /* WaDisablePooledEuLoadBalancingFix:bxt */
1031 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1032 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1033 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1036 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1037 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1039 GEN7_HALF_SLICE_CHICKEN1,
1040 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1043 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1044 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1045 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1046 /* WaDisableLSQCROPERFforOCL:bxt */
1047 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1048 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1052 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1057 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1058 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1059 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1060 L3_HIGH_PRIO_CREDITS(2));
1062 /* WaToEnableHwFixForPushConstHWBug:bxt */
1063 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1064 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1065 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1067 /* WaInPlaceDecompressionHang:bxt */
1068 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1069 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1070 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1075 static int kbl_init_workarounds(struct intel_engine_cs *engine)
1077 struct drm_i915_private *dev_priv = engine->i915;
1080 ret = gen9_init_workarounds(engine);
1084 /* WaEnableGapsTsvCreditFix:kbl */
1085 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1086 GEN9_GAPS_TSV_CREDIT_DISABLE));
1088 /* WaDisableDynamicCreditSharing:kbl */
1089 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1090 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1091 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1093 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1094 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1095 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1096 HDC_FENCE_DEST_SLM_DISABLE);
1098 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1099 * involving this register should also be added to WA batch as required.
1101 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1102 /* WaDisableLSQCROPERFforOCL:kbl */
1103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1104 GEN8_LQSC_RO_PERF_DIS);
1106 /* WaToEnableHwFixForPushConstHWBug:kbl */
1107 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1108 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1109 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1111 /* WaDisableGafsUnitClkGating:kbl */
1112 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1114 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1116 GEN7_HALF_SLICE_CHICKEN1,
1117 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1119 /* WaInPlaceDecompressionHang:kbl */
1120 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1121 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1123 /* WaDisableLSQCROPERFforOCL:kbl */
1124 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1131 int init_workarounds_ring(struct intel_engine_cs *engine)
1133 struct drm_i915_private *dev_priv = engine->i915;
1135 WARN_ON(engine->id != RCS);
1137 dev_priv->workarounds.count = 0;
1138 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1140 if (IS_BROADWELL(dev_priv))
1141 return bdw_init_workarounds(engine);
1143 if (IS_CHERRYVIEW(dev_priv))
1144 return chv_init_workarounds(engine);
1146 if (IS_SKYLAKE(dev_priv))
1147 return skl_init_workarounds(engine);
1149 if (IS_BROXTON(dev_priv))
1150 return bxt_init_workarounds(engine);
1152 if (IS_KABYLAKE(dev_priv))
1153 return kbl_init_workarounds(engine);
1158 static int init_render_ring(struct intel_engine_cs *engine)
1160 struct drm_i915_private *dev_priv = engine->i915;
1161 int ret = init_ring_common(engine);
1165 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1166 if (IS_GEN(dev_priv, 4, 6))
1167 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1169 /* We need to disable the AsyncFlip performance optimisations in order
1170 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1171 * programmed to '1' on all products.
1173 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1175 if (IS_GEN(dev_priv, 6, 7))
1176 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1178 /* Required for the hardware to program scanline values for waiting */
1179 /* WaEnableFlushTlbInvalidationMode:snb */
1180 if (IS_GEN6(dev_priv))
1181 I915_WRITE(GFX_MODE,
1182 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1184 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1185 if (IS_GEN7(dev_priv))
1186 I915_WRITE(GFX_MODE_GEN7,
1187 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1188 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1190 if (IS_GEN6(dev_priv)) {
1191 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1192 * "If this bit is set, STCunit will have LRA as replacement
1193 * policy. [...] This bit must be reset. LRA replacement
1194 * policy is not supported."
1196 I915_WRITE(CACHE_MODE_0,
1197 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1200 if (IS_GEN(dev_priv, 6, 7))
1201 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1203 if (INTEL_INFO(dev_priv)->gen >= 6)
1204 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1206 return init_workarounds_ring(engine);
1209 static void render_ring_cleanup(struct intel_engine_cs *engine)
1211 struct drm_i915_private *dev_priv = engine->i915;
1213 i915_vma_unpin_and_release(&dev_priv->semaphore);
1216 static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
1218 struct drm_i915_private *dev_priv = req->i915;
1219 struct intel_engine_cs *waiter;
1220 enum intel_engine_id id;
1222 for_each_engine(waiter, dev_priv, id) {
1223 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1224 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1227 *out++ = GFX_OP_PIPE_CONTROL(6);
1228 *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
1229 PIPE_CONTROL_QW_WRITE |
1230 PIPE_CONTROL_CS_STALL);
1231 *out++ = lower_32_bits(gtt_offset);
1232 *out++ = upper_32_bits(gtt_offset);
1233 *out++ = req->global_seqno;
1235 *out++ = (MI_SEMAPHORE_SIGNAL |
1236 MI_SEMAPHORE_TARGET(waiter->hw_id));
1243 static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
1245 struct drm_i915_private *dev_priv = req->i915;
1246 struct intel_engine_cs *waiter;
1247 enum intel_engine_id id;
1249 for_each_engine(waiter, dev_priv, id) {
1250 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1251 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1254 *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1255 *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
1256 *out++ = upper_32_bits(gtt_offset);
1257 *out++ = req->global_seqno;
1258 *out++ = (MI_SEMAPHORE_SIGNAL |
1259 MI_SEMAPHORE_TARGET(waiter->hw_id));
1266 static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
1268 struct drm_i915_private *dev_priv = req->i915;
1269 struct intel_engine_cs *engine;
1270 enum intel_engine_id id;
1273 for_each_engine(engine, dev_priv, id) {
1274 i915_reg_t mbox_reg;
1276 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
1279 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
1280 if (i915_mmio_reg_valid(mbox_reg)) {
1281 *out++ = MI_LOAD_REGISTER_IMM(1);
1282 *out++ = i915_mmio_reg_offset(mbox_reg);
1283 *out++ = req->global_seqno;
1293 static void i9xx_submit_request(struct drm_i915_gem_request *request)
1295 struct drm_i915_private *dev_priv = request->i915;
1297 I915_WRITE_TAIL(request->engine, request->tail);
1300 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
1303 *out++ = MI_STORE_DWORD_INDEX;
1304 *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
1305 *out++ = req->global_seqno;
1306 *out++ = MI_USER_INTERRUPT;
1308 req->tail = intel_ring_offset(req->ring, out);
1311 static const int i9xx_emit_breadcrumb_sz = 4;
1314 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
1316 * @request - request to write to the ring
1318 * Update the mailbox registers in the *other* rings with the current seqno.
1319 * This acts like a signal in the canonical semaphore.
1321 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
1324 return i9xx_emit_breadcrumb(req,
1325 req->engine->semaphore.signal(req, out));
1328 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
1331 struct intel_engine_cs *engine = req->engine;
1333 if (engine->semaphore.signal)
1334 out = engine->semaphore.signal(req, out);
1336 *out++ = GFX_OP_PIPE_CONTROL(6);
1337 *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
1338 PIPE_CONTROL_CS_STALL |
1339 PIPE_CONTROL_QW_WRITE);
1340 *out++ = intel_hws_seqno_address(engine);
1342 *out++ = req->global_seqno;
1343 /* We're thrashing one dword of HWS. */
1345 *out++ = MI_USER_INTERRUPT;
1348 req->tail = intel_ring_offset(req->ring, out);
1351 static const int gen8_render_emit_breadcrumb_sz = 8;
1354 * intel_ring_sync - sync the waiter to the signaller on seqno
1356 * @waiter - ring that is waiting
1357 * @signaller - ring which has, or will signal
1358 * @seqno - seqno which the waiter will block on
1362 gen8_ring_sync_to(struct drm_i915_gem_request *req,
1363 struct drm_i915_gem_request *signal)
1365 struct intel_ring *ring = req->ring;
1366 struct drm_i915_private *dev_priv = req->i915;
1367 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
1368 struct i915_hw_ppgtt *ppgtt;
1371 ret = intel_ring_begin(req, 4);
1375 intel_ring_emit(ring,
1377 MI_SEMAPHORE_GLOBAL_GTT |
1378 MI_SEMAPHORE_SAD_GTE_SDD);
1379 intel_ring_emit(ring, signal->global_seqno);
1380 intel_ring_emit(ring, lower_32_bits(offset));
1381 intel_ring_emit(ring, upper_32_bits(offset));
1382 intel_ring_advance(ring);
1384 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1385 * pagetables and we must reload them before executing the batch.
1386 * We do this on the i915_switch_context() following the wait and
1387 * before the dispatch.
1389 ppgtt = req->ctx->ppgtt;
1390 if (ppgtt && req->engine->id != RCS)
1391 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
1396 gen6_ring_sync_to(struct drm_i915_gem_request *req,
1397 struct drm_i915_gem_request *signal)
1399 struct intel_ring *ring = req->ring;
1400 u32 dw1 = MI_SEMAPHORE_MBOX |
1401 MI_SEMAPHORE_COMPARE |
1402 MI_SEMAPHORE_REGISTER;
1403 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
1406 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1408 ret = intel_ring_begin(req, 4);
1412 intel_ring_emit(ring, dw1 | wait_mbox);
1413 /* Throughout all of the GEM code, seqno passed implies our current
1414 * seqno is >= the last seqno executed. However for hardware the
1415 * comparison is strictly greater than.
1417 intel_ring_emit(ring, signal->global_seqno - 1);
1418 intel_ring_emit(ring, 0);
1419 intel_ring_emit(ring, MI_NOOP);
1420 intel_ring_advance(ring);
1426 gen5_seqno_barrier(struct intel_engine_cs *engine)
1428 /* MI_STORE are internally buffered by the GPU and not flushed
1429 * either by MI_FLUSH or SyncFlush or any other combination of
1432 * "Only the submission of the store operation is guaranteed.
1433 * The write result will be complete (coherent) some time later
1434 * (this is practically a finite period but there is no guaranteed
1437 * Empirically, we observe that we need a delay of at least 75us to
1438 * be sure that the seqno write is visible by the CPU.
1440 usleep_range(125, 250);
1444 gen6_seqno_barrier(struct intel_engine_cs *engine)
1446 struct drm_i915_private *dev_priv = engine->i915;
1448 /* Workaround to force correct ordering between irq and seqno writes on
1449 * ivb (and maybe also on snb) by reading from a CS register (like
1450 * ACTHD) before reading the status page.
1452 * Note that this effectively stalls the read by the time it takes to
1453 * do a memory transaction, which more or less ensures that the write
1454 * from the GPU has sufficient time to invalidate the CPU cacheline.
1455 * Alternatively we could delay the interrupt from the CS ring to give
1456 * the write time to land, but that would incur a delay after every
1457 * batch i.e. much more frequent than a delay when waiting for the
1458 * interrupt (with the same net latency).
1460 * Also note that to prevent whole machine hangs on gen7, we have to
1461 * take the spinlock to guard against concurrent cacheline access.
1463 spin_lock_irq(&dev_priv->uncore.lock);
1464 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
1465 spin_unlock_irq(&dev_priv->uncore.lock);
1469 gen5_irq_enable(struct intel_engine_cs *engine)
1471 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
1475 gen5_irq_disable(struct intel_engine_cs *engine)
1477 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
1481 i9xx_irq_enable(struct intel_engine_cs *engine)
1483 struct drm_i915_private *dev_priv = engine->i915;
1485 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1486 I915_WRITE(IMR, dev_priv->irq_mask);
1487 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1491 i9xx_irq_disable(struct intel_engine_cs *engine)
1493 struct drm_i915_private *dev_priv = engine->i915;
1495 dev_priv->irq_mask |= engine->irq_enable_mask;
1496 I915_WRITE(IMR, dev_priv->irq_mask);
1500 i8xx_irq_enable(struct intel_engine_cs *engine)
1502 struct drm_i915_private *dev_priv = engine->i915;
1504 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1505 I915_WRITE16(IMR, dev_priv->irq_mask);
1506 POSTING_READ16(RING_IMR(engine->mmio_base));
1510 i8xx_irq_disable(struct intel_engine_cs *engine)
1512 struct drm_i915_private *dev_priv = engine->i915;
1514 dev_priv->irq_mask |= engine->irq_enable_mask;
1515 I915_WRITE16(IMR, dev_priv->irq_mask);
1519 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1521 struct intel_ring *ring = req->ring;
1524 ret = intel_ring_begin(req, 2);
1528 intel_ring_emit(ring, MI_FLUSH);
1529 intel_ring_emit(ring, MI_NOOP);
1530 intel_ring_advance(ring);
1535 gen6_irq_enable(struct intel_engine_cs *engine)
1537 struct drm_i915_private *dev_priv = engine->i915;
1539 I915_WRITE_IMR(engine,
1540 ~(engine->irq_enable_mask |
1541 engine->irq_keep_mask));
1542 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1546 gen6_irq_disable(struct intel_engine_cs *engine)
1548 struct drm_i915_private *dev_priv = engine->i915;
1550 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1551 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1555 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1557 struct drm_i915_private *dev_priv = engine->i915;
1559 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1560 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
1564 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1566 struct drm_i915_private *dev_priv = engine->i915;
1568 I915_WRITE_IMR(engine, ~0);
1569 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
1573 gen8_irq_enable(struct intel_engine_cs *engine)
1575 struct drm_i915_private *dev_priv = engine->i915;
1577 I915_WRITE_IMR(engine,
1578 ~(engine->irq_enable_mask |
1579 engine->irq_keep_mask));
1580 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1584 gen8_irq_disable(struct intel_engine_cs *engine)
1586 struct drm_i915_private *dev_priv = engine->i915;
1588 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1592 i965_emit_bb_start(struct drm_i915_gem_request *req,
1593 u64 offset, u32 length,
1594 unsigned int dispatch_flags)
1596 struct intel_ring *ring = req->ring;
1599 ret = intel_ring_begin(req, 2);
1603 intel_ring_emit(ring,
1604 MI_BATCH_BUFFER_START |
1606 (dispatch_flags & I915_DISPATCH_SECURE ?
1607 0 : MI_BATCH_NON_SECURE_I965));
1608 intel_ring_emit(ring, offset);
1609 intel_ring_advance(ring);
1614 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1615 #define I830_BATCH_LIMIT (256*1024)
1616 #define I830_TLB_ENTRIES (2)
1617 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1619 i830_emit_bb_start(struct drm_i915_gem_request *req,
1620 u64 offset, u32 len,
1621 unsigned int dispatch_flags)
1623 struct intel_ring *ring = req->ring;
1624 u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
1627 ret = intel_ring_begin(req, 6);
1631 /* Evict the invalid PTE TLBs */
1632 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1633 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1634 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1635 intel_ring_emit(ring, cs_offset);
1636 intel_ring_emit(ring, 0xdeadbeef);
1637 intel_ring_emit(ring, MI_NOOP);
1638 intel_ring_advance(ring);
1640 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1641 if (len > I830_BATCH_LIMIT)
1644 ret = intel_ring_begin(req, 6 + 2);
1648 /* Blit the batch (which has now all relocs applied) to the
1649 * stable batch scratch bo area (so that the CS never
1650 * stumbles over its tlb invalidation bug) ...
1652 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1653 intel_ring_emit(ring,
1654 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
1655 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1656 intel_ring_emit(ring, cs_offset);
1657 intel_ring_emit(ring, 4096);
1658 intel_ring_emit(ring, offset);
1660 intel_ring_emit(ring, MI_FLUSH);
1661 intel_ring_emit(ring, MI_NOOP);
1662 intel_ring_advance(ring);
1664 /* ... and execute it. */
1668 ret = intel_ring_begin(req, 2);
1672 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1673 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1674 0 : MI_BATCH_NON_SECURE));
1675 intel_ring_advance(ring);
1681 i915_emit_bb_start(struct drm_i915_gem_request *req,
1682 u64 offset, u32 len,
1683 unsigned int dispatch_flags)
1685 struct intel_ring *ring = req->ring;
1688 ret = intel_ring_begin(req, 2);
1692 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1693 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1694 0 : MI_BATCH_NON_SECURE));
1695 intel_ring_advance(ring);
1700 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1702 struct drm_i915_private *dev_priv = engine->i915;
1704 if (!dev_priv->status_page_dmah)
1707 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1708 engine->status_page.page_addr = NULL;
1711 static void cleanup_status_page(struct intel_engine_cs *engine)
1713 struct i915_vma *vma;
1714 struct drm_i915_gem_object *obj;
1716 vma = fetch_and_zero(&engine->status_page.vma);
1722 i915_vma_unpin(vma);
1723 i915_vma_close(vma);
1725 i915_gem_object_unpin_map(obj);
1726 __i915_gem_object_release_unless_active(obj);
1729 static int init_status_page(struct intel_engine_cs *engine)
1731 struct drm_i915_gem_object *obj;
1732 struct i915_vma *vma;
1737 obj = i915_gem_object_create_internal(engine->i915, 4096);
1739 DRM_ERROR("Failed to allocate status page\n");
1740 return PTR_ERR(obj);
1743 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1747 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
1754 if (!HAS_LLC(engine->i915))
1755 /* On g33, we cannot place HWS above 256MiB, so
1756 * restrict its pinning to the low mappable arena.
1757 * Though this restriction is not documented for
1758 * gen4, gen5, or byt, they also behave similarly
1759 * and hang if the HWS is placed at the top of the
1760 * GTT. To generalise, it appears that all !llc
1761 * platforms have issues with us placing the HWS
1762 * above the mappable region (even though we never
1765 flags |= PIN_MAPPABLE;
1766 ret = i915_vma_pin(vma, 0, 4096, flags);
1770 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1771 if (IS_ERR(vaddr)) {
1772 ret = PTR_ERR(vaddr);
1776 engine->status_page.vma = vma;
1777 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
1778 engine->status_page.page_addr = memset(vaddr, 0, 4096);
1780 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1781 engine->name, i915_ggtt_offset(vma));
1785 i915_vma_unpin(vma);
1787 i915_gem_object_put(obj);
1791 static int init_phys_status_page(struct intel_engine_cs *engine)
1793 struct drm_i915_private *dev_priv = engine->i915;
1795 dev_priv->status_page_dmah =
1796 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1797 if (!dev_priv->status_page_dmah)
1800 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1801 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
1806 int intel_ring_pin(struct intel_ring *ring)
1808 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1809 unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
1810 enum i915_map_type map;
1811 struct i915_vma *vma = ring->vma;
1815 GEM_BUG_ON(ring->vaddr);
1817 map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
1819 if (vma->obj->stolen)
1820 flags |= PIN_MAPPABLE;
1822 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1823 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1824 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1826 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1831 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1835 if (i915_vma_is_map_and_fenceable(vma))
1836 addr = (void __force *)i915_vma_pin_iomap(vma);
1838 addr = i915_gem_object_pin_map(vma->obj, map);
1846 i915_vma_unpin(vma);
1847 return PTR_ERR(addr);
1850 void intel_ring_unpin(struct intel_ring *ring)
1852 GEM_BUG_ON(!ring->vma);
1853 GEM_BUG_ON(!ring->vaddr);
1855 if (i915_vma_is_map_and_fenceable(ring->vma))
1856 i915_vma_unpin_iomap(ring->vma);
1858 i915_gem_object_unpin_map(ring->vma->obj);
1861 i915_vma_unpin(ring->vma);
1864 static struct i915_vma *
1865 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1867 struct drm_i915_gem_object *obj;
1868 struct i915_vma *vma;
1870 obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
1872 obj = i915_gem_object_create(&dev_priv->drm, size);
1874 return ERR_CAST(obj);
1876 /* mark ring buffers as read-only from GPU side by default */
1879 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
1886 i915_gem_object_put(obj);
1891 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1893 struct intel_ring *ring;
1894 struct i915_vma *vma;
1896 GEM_BUG_ON(!is_power_of_2(size));
1897 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1899 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1901 return ERR_PTR(-ENOMEM);
1903 ring->engine = engine;
1905 INIT_LIST_HEAD(&ring->request_list);
1908 /* Workaround an erratum on the i830 which causes a hang if
1909 * the TAIL pointer points to within the last 2 cachelines
1912 ring->effective_size = size;
1913 if (IS_I830(engine->i915) || IS_845G(engine->i915))
1914 ring->effective_size -= 2 * CACHELINE_BYTES;
1916 ring->last_retired_head = -1;
1917 intel_ring_update_space(ring);
1919 vma = intel_ring_create_vma(engine->i915, size);
1922 return ERR_CAST(vma);
1930 intel_ring_free(struct intel_ring *ring)
1932 struct drm_i915_gem_object *obj = ring->vma->obj;
1934 i915_vma_close(ring->vma);
1935 __i915_gem_object_release_unless_active(obj);
1940 static int intel_ring_context_pin(struct i915_gem_context *ctx,
1941 struct intel_engine_cs *engine)
1943 struct intel_context *ce = &ctx->engine[engine->id];
1946 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1948 if (ce->pin_count++)
1952 ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
1956 ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
1957 PIN_GLOBAL | PIN_HIGH);
1962 /* The kernel context is only used as a placeholder for flushing the
1963 * active context. It is never used for submitting user rendering and
1964 * as such never requires the golden render context, and so we can skip
1965 * emitting it when we switch to the kernel context. This is required
1966 * as during eviction we cannot allocate and pin the renderstate in
1967 * order to initialise the context.
1969 if (ctx == ctx->i915->kernel_context)
1970 ce->initialised = true;
1972 i915_gem_context_get(ctx);
1980 static void intel_ring_context_unpin(struct i915_gem_context *ctx,
1981 struct intel_engine_cs *engine)
1983 struct intel_context *ce = &ctx->engine[engine->id];
1985 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1987 if (--ce->pin_count)
1991 i915_vma_unpin(ce->state);
1993 i915_gem_context_put(ctx);
1996 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1998 struct drm_i915_private *dev_priv = engine->i915;
1999 struct intel_ring *ring;
2002 WARN_ON(engine->buffer);
2004 intel_engine_setup_common(engine);
2006 ret = intel_engine_init_common(engine);
2010 /* We may need to do things with the shrinker which
2011 * require us to immediately switch back to the default
2012 * context. This can cause a problem as pinning the
2013 * default context also requires GTT space which may not
2014 * be available. To avoid this we always pin the default
2017 ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2021 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
2023 ret = PTR_ERR(ring);
2027 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2028 WARN_ON(engine->id != RCS);
2029 ret = init_phys_status_page(engine);
2033 ret = init_status_page(engine);
2038 ret = intel_ring_pin(ring);
2040 intel_ring_free(ring);
2043 engine->buffer = ring;
2048 intel_engine_cleanup(engine);
2052 void intel_engine_cleanup(struct intel_engine_cs *engine)
2054 struct drm_i915_private *dev_priv;
2056 dev_priv = engine->i915;
2058 if (engine->buffer) {
2059 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
2060 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2062 intel_ring_unpin(engine->buffer);
2063 intel_ring_free(engine->buffer);
2064 engine->buffer = NULL;
2067 if (engine->cleanup)
2068 engine->cleanup(engine);
2070 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2071 WARN_ON(engine->id != RCS);
2072 cleanup_phys_status_page(engine);
2074 cleanup_status_page(engine);
2077 intel_engine_cleanup_common(engine);
2079 intel_ring_context_unpin(dev_priv->kernel_context, engine);
2081 engine->i915 = NULL;
2082 dev_priv->engine[engine->id] = NULL;
2086 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
2088 struct intel_engine_cs *engine;
2089 enum intel_engine_id id;
2091 for_each_engine(engine, dev_priv, id) {
2092 engine->buffer->head = engine->buffer->tail;
2093 engine->buffer->last_retired_head = -1;
2097 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2101 /* Flush enough space to reduce the likelihood of waiting after
2102 * we start building the request - in which case we will just
2103 * have to repeat work.
2105 request->reserved_space += LEGACY_REQUEST_SIZE;
2107 request->ring = request->engine->buffer;
2109 ret = intel_ring_begin(request, 0);
2113 request->reserved_space -= LEGACY_REQUEST_SIZE;
2117 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2119 struct intel_ring *ring = req->ring;
2120 struct drm_i915_gem_request *target;
2123 lockdep_assert_held(&req->i915->drm.struct_mutex);
2125 intel_ring_update_space(ring);
2126 if (ring->space >= bytes)
2130 * Space is reserved in the ringbuffer for finalising the request,
2131 * as that cannot be allowed to fail. During request finalisation,
2132 * reserved_space is set to 0 to stop the overallocation and the
2133 * assumption is that then we never need to wait (which has the
2134 * risk of failing with EINTR).
2136 * See also i915_gem_request_alloc() and i915_add_request().
2138 GEM_BUG_ON(!req->reserved_space);
2140 list_for_each_entry(target, &ring->request_list, ring_link) {
2143 /* Would completion of this request free enough space? */
2144 space = __intel_ring_space(target->postfix, ring->tail,
2150 if (WARN_ON(&target->ring_link == &ring->request_list))
2153 timeout = i915_wait_request(target,
2154 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
2155 MAX_SCHEDULE_TIMEOUT);
2159 i915_gem_request_retire_upto(target);
2161 intel_ring_update_space(ring);
2162 GEM_BUG_ON(ring->space < bytes);
2166 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2168 struct intel_ring *ring = req->ring;
2169 int remain_actual = ring->size - ring->tail;
2170 int remain_usable = ring->effective_size - ring->tail;
2171 int bytes = num_dwords * sizeof(u32);
2172 int total_bytes, wait_bytes;
2173 bool need_wrap = false;
2175 total_bytes = bytes + req->reserved_space;
2177 if (unlikely(bytes > remain_usable)) {
2179 * Not enough space for the basic request. So need to flush
2180 * out the remainder and then wait for base + reserved.
2182 wait_bytes = remain_actual + total_bytes;
2184 } else if (unlikely(total_bytes > remain_usable)) {
2186 * The base request will fit but the reserved space
2187 * falls off the end. So we don't need an immediate wrap
2188 * and only need to effectively wait for the reserved
2189 * size space from the start of ringbuffer.
2191 wait_bytes = remain_actual + req->reserved_space;
2193 /* No wrapping required, just waiting. */
2194 wait_bytes = total_bytes;
2197 if (wait_bytes > ring->space) {
2198 int ret = wait_for_space(req, wait_bytes);
2203 if (unlikely(need_wrap)) {
2204 GEM_BUG_ON(remain_actual > ring->space);
2205 GEM_BUG_ON(ring->tail + remain_actual > ring->size);
2207 /* Fill the tail with MI_NOOP */
2208 memset(ring->vaddr + ring->tail, 0, remain_actual);
2210 ring->space -= remain_actual;
2213 ring->space -= bytes;
2214 GEM_BUG_ON(ring->space < 0);
2218 /* Align the ring tail to a cacheline boundary */
2219 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2221 struct intel_ring *ring = req->ring;
2223 (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
2226 if (num_dwords == 0)
2229 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
2230 ret = intel_ring_begin(req, num_dwords);
2234 while (num_dwords--)
2235 intel_ring_emit(ring, MI_NOOP);
2237 intel_ring_advance(ring);
2242 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
2244 struct drm_i915_private *dev_priv = request->i915;
2246 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2248 /* Every tail move must follow the sequence below */
2250 /* Disable notification that the ring is IDLE. The GT
2251 * will then assume that it is busy and bring it out of rc6.
2253 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2254 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2256 /* Clear the context id. Here be magic! */
2257 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2259 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2260 if (intel_wait_for_register_fw(dev_priv,
2261 GEN6_BSD_SLEEP_PSMI_CONTROL,
2262 GEN6_BSD_SLEEP_INDICATOR,
2265 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2267 /* Now that the ring is fully powered up, update the tail */
2268 i9xx_submit_request(request);
2270 /* Let the ring send IDLE messages to the GT again,
2271 * and so let it sleep to conserve power when idle.
2273 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2274 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2276 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2279 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2281 struct intel_ring *ring = req->ring;
2285 ret = intel_ring_begin(req, 4);
2290 if (INTEL_GEN(req->i915) >= 8)
2293 /* We always require a command barrier so that subsequent
2294 * commands, such as breadcrumb interrupts, are strictly ordered
2295 * wrt the contents of the write cache being flushed to memory
2296 * (and thus being coherent from the CPU).
2298 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2301 * Bspec vol 1c.5 - video engine command streamer:
2302 * "If ENABLED, all TLBs will be invalidated once the flush
2303 * operation is complete. This bit is only valid when the
2304 * Post-Sync Operation field is a value of 1h or 3h."
2306 if (mode & EMIT_INVALIDATE)
2307 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2309 intel_ring_emit(ring, cmd);
2310 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2311 if (INTEL_GEN(req->i915) >= 8) {
2312 intel_ring_emit(ring, 0); /* upper addr */
2313 intel_ring_emit(ring, 0); /* value */
2315 intel_ring_emit(ring, 0);
2316 intel_ring_emit(ring, MI_NOOP);
2318 intel_ring_advance(ring);
2323 gen8_emit_bb_start(struct drm_i915_gem_request *req,
2324 u64 offset, u32 len,
2325 unsigned int dispatch_flags)
2327 struct intel_ring *ring = req->ring;
2328 bool ppgtt = USES_PPGTT(req->i915) &&
2329 !(dispatch_flags & I915_DISPATCH_SECURE);
2332 ret = intel_ring_begin(req, 4);
2336 /* FIXME(BDW): Address space and security selectors. */
2337 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
2338 (dispatch_flags & I915_DISPATCH_RS ?
2339 MI_BATCH_RESOURCE_STREAMER : 0));
2340 intel_ring_emit(ring, lower_32_bits(offset));
2341 intel_ring_emit(ring, upper_32_bits(offset));
2342 intel_ring_emit(ring, MI_NOOP);
2343 intel_ring_advance(ring);
2349 hsw_emit_bb_start(struct drm_i915_gem_request *req,
2350 u64 offset, u32 len,
2351 unsigned int dispatch_flags)
2353 struct intel_ring *ring = req->ring;
2356 ret = intel_ring_begin(req, 2);
2360 intel_ring_emit(ring,
2361 MI_BATCH_BUFFER_START |
2362 (dispatch_flags & I915_DISPATCH_SECURE ?
2363 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
2364 (dispatch_flags & I915_DISPATCH_RS ?
2365 MI_BATCH_RESOURCE_STREAMER : 0));
2366 /* bit0-7 is the length on GEN6+ */
2367 intel_ring_emit(ring, offset);
2368 intel_ring_advance(ring);
2374 gen6_emit_bb_start(struct drm_i915_gem_request *req,
2375 u64 offset, u32 len,
2376 unsigned int dispatch_flags)
2378 struct intel_ring *ring = req->ring;
2381 ret = intel_ring_begin(req, 2);
2385 intel_ring_emit(ring,
2386 MI_BATCH_BUFFER_START |
2387 (dispatch_flags & I915_DISPATCH_SECURE ?
2388 0 : MI_BATCH_NON_SECURE_I965));
2389 /* bit0-7 is the length on GEN6+ */
2390 intel_ring_emit(ring, offset);
2391 intel_ring_advance(ring);
2396 /* Blitter support (SandyBridge+) */
2398 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2400 struct intel_ring *ring = req->ring;
2404 ret = intel_ring_begin(req, 4);
2409 if (INTEL_GEN(req->i915) >= 8)
2412 /* We always require a command barrier so that subsequent
2413 * commands, such as breadcrumb interrupts, are strictly ordered
2414 * wrt the contents of the write cache being flushed to memory
2415 * (and thus being coherent from the CPU).
2417 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2420 * Bspec vol 1c.3 - blitter engine command streamer:
2421 * "If ENABLED, all TLBs will be invalidated once the flush
2422 * operation is complete. This bit is only valid when the
2423 * Post-Sync Operation field is a value of 1h or 3h."
2425 if (mode & EMIT_INVALIDATE)
2426 cmd |= MI_INVALIDATE_TLB;
2427 intel_ring_emit(ring, cmd);
2428 intel_ring_emit(ring,
2429 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2430 if (INTEL_GEN(req->i915) >= 8) {
2431 intel_ring_emit(ring, 0); /* upper addr */
2432 intel_ring_emit(ring, 0); /* value */
2434 intel_ring_emit(ring, 0);
2435 intel_ring_emit(ring, MI_NOOP);
2437 intel_ring_advance(ring);
2442 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2443 struct intel_engine_cs *engine)
2445 struct drm_i915_gem_object *obj;
2448 if (!i915.semaphores)
2451 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
2452 struct i915_vma *vma;
2454 obj = i915_gem_object_create(&dev_priv->drm, 4096);
2458 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
2462 ret = i915_gem_object_set_to_gtt_domain(obj, false);
2466 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
2470 dev_priv->semaphore = vma;
2473 if (INTEL_GEN(dev_priv) >= 8) {
2474 u32 offset = i915_ggtt_offset(dev_priv->semaphore);
2476 engine->semaphore.sync_to = gen8_ring_sync_to;
2477 engine->semaphore.signal = gen8_xcs_signal;
2479 for (i = 0; i < I915_NUM_ENGINES; i++) {
2482 if (i != engine->id)
2483 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2485 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2487 engine->semaphore.signal_ggtt[i] = ring_offset;
2489 } else if (INTEL_GEN(dev_priv) >= 6) {
2490 engine->semaphore.sync_to = gen6_ring_sync_to;
2491 engine->semaphore.signal = gen6_signal;
2494 * The current semaphore is only applied on pre-gen8
2495 * platform. And there is no VCS2 ring on the pre-gen8
2496 * platform. So the semaphore between RCS and VCS2 is
2497 * initialized as INVALID. Gen8 will initialize the
2498 * sema between VCS2 and RCS later.
2500 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
2501 static const struct {
2503 i915_reg_t mbox_reg;
2504 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2506 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2507 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2508 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2511 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2512 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2513 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2516 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2517 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2518 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2521 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2522 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2523 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2527 i915_reg_t mbox_reg;
2529 if (i == engine->hw_id) {
2530 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2531 mbox_reg = GEN6_NOSYNC;
2533 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2534 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2537 engine->semaphore.mbox.wait[i] = wait_mbox;
2538 engine->semaphore.mbox.signal[i] = mbox_reg;
2545 i915_gem_object_put(obj);
2547 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2548 i915.semaphores = 0;
2551 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2552 struct intel_engine_cs *engine)
2554 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2556 if (INTEL_GEN(dev_priv) >= 8) {
2557 engine->irq_enable = gen8_irq_enable;
2558 engine->irq_disable = gen8_irq_disable;
2559 engine->irq_seqno_barrier = gen6_seqno_barrier;
2560 } else if (INTEL_GEN(dev_priv) >= 6) {
2561 engine->irq_enable = gen6_irq_enable;
2562 engine->irq_disable = gen6_irq_disable;
2563 engine->irq_seqno_barrier = gen6_seqno_barrier;
2564 } else if (INTEL_GEN(dev_priv) >= 5) {
2565 engine->irq_enable = gen5_irq_enable;
2566 engine->irq_disable = gen5_irq_disable;
2567 engine->irq_seqno_barrier = gen5_seqno_barrier;
2568 } else if (INTEL_GEN(dev_priv) >= 3) {
2569 engine->irq_enable = i9xx_irq_enable;
2570 engine->irq_disable = i9xx_irq_disable;
2572 engine->irq_enable = i8xx_irq_enable;
2573 engine->irq_disable = i8xx_irq_disable;
2577 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2578 struct intel_engine_cs *engine)
2580 intel_ring_init_irq(dev_priv, engine);
2581 intel_ring_init_semaphores(dev_priv, engine);
2583 engine->init_hw = init_ring_common;
2584 engine->reset_hw = reset_ring_common;
2586 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2587 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2588 if (i915.semaphores) {
2591 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2593 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2594 if (INTEL_GEN(dev_priv) >= 8) {
2595 engine->emit_breadcrumb_sz += num_rings * 6;
2597 engine->emit_breadcrumb_sz += num_rings * 3;
2599 engine->emit_breadcrumb_sz++;
2602 engine->submit_request = i9xx_submit_request;
2604 if (INTEL_GEN(dev_priv) >= 8)
2605 engine->emit_bb_start = gen8_emit_bb_start;
2606 else if (INTEL_GEN(dev_priv) >= 6)
2607 engine->emit_bb_start = gen6_emit_bb_start;
2608 else if (INTEL_GEN(dev_priv) >= 4)
2609 engine->emit_bb_start = i965_emit_bb_start;
2610 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2611 engine->emit_bb_start = i830_emit_bb_start;
2613 engine->emit_bb_start = i915_emit_bb_start;
2616 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2618 struct drm_i915_private *dev_priv = engine->i915;
2621 intel_ring_default_vfuncs(dev_priv, engine);
2623 if (HAS_L3_DPF(dev_priv))
2624 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2626 if (INTEL_GEN(dev_priv) >= 8) {
2627 engine->init_context = intel_rcs_ctx_init;
2628 engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
2629 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
2630 engine->emit_flush = gen8_render_ring_flush;
2631 if (i915.semaphores) {
2634 engine->semaphore.signal = gen8_rcs_signal;
2637 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2638 engine->emit_breadcrumb_sz += num_rings * 6;
2640 } else if (INTEL_GEN(dev_priv) >= 6) {
2641 engine->init_context = intel_rcs_ctx_init;
2642 engine->emit_flush = gen7_render_ring_flush;
2643 if (IS_GEN6(dev_priv))
2644 engine->emit_flush = gen6_render_ring_flush;
2645 } else if (IS_GEN5(dev_priv)) {
2646 engine->emit_flush = gen4_render_ring_flush;
2648 if (INTEL_GEN(dev_priv) < 4)
2649 engine->emit_flush = gen2_render_ring_flush;
2651 engine->emit_flush = gen4_render_ring_flush;
2652 engine->irq_enable_mask = I915_USER_INTERRUPT;
2655 if (IS_HASWELL(dev_priv))
2656 engine->emit_bb_start = hsw_emit_bb_start;
2658 engine->init_hw = init_render_ring;
2659 engine->cleanup = render_ring_cleanup;
2661 ret = intel_init_ring_buffer(engine);
2665 if (INTEL_GEN(dev_priv) >= 6) {
2666 ret = intel_engine_create_scratch(engine, 4096);
2669 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2670 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2678 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2680 struct drm_i915_private *dev_priv = engine->i915;
2682 intel_ring_default_vfuncs(dev_priv, engine);
2684 if (INTEL_GEN(dev_priv) >= 6) {
2685 /* gen6 bsd needs a special wa for tail updates */
2686 if (IS_GEN6(dev_priv))
2687 engine->submit_request = gen6_bsd_submit_request;
2688 engine->emit_flush = gen6_bsd_ring_flush;
2689 if (INTEL_GEN(dev_priv) < 8)
2690 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2692 engine->mmio_base = BSD_RING_BASE;
2693 engine->emit_flush = bsd_ring_flush;
2694 if (IS_GEN5(dev_priv))
2695 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2697 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2700 return intel_init_ring_buffer(engine);
2704 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2706 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
2708 struct drm_i915_private *dev_priv = engine->i915;
2710 intel_ring_default_vfuncs(dev_priv, engine);
2712 engine->emit_flush = gen6_bsd_ring_flush;
2714 return intel_init_ring_buffer(engine);
2717 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2719 struct drm_i915_private *dev_priv = engine->i915;
2721 intel_ring_default_vfuncs(dev_priv, engine);
2723 engine->emit_flush = gen6_ring_flush;
2724 if (INTEL_GEN(dev_priv) < 8)
2725 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2727 return intel_init_ring_buffer(engine);
2730 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2732 struct drm_i915_private *dev_priv = engine->i915;
2734 intel_ring_default_vfuncs(dev_priv, engine);
2736 engine->emit_flush = gen6_ring_flush;
2738 if (INTEL_GEN(dev_priv) < 8) {
2739 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2740 engine->irq_enable = hsw_vebox_irq_enable;
2741 engine->irq_disable = hsw_vebox_irq_disable;
2744 return intel_init_ring_buffer(engine);