2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
60 * Regarding the creation of contexts, we have:
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
89 * Execlists implementation:
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 * This method works as follows:
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
135 #include <drm/drmP.h>
136 #include <drm/i915_drm.h>
137 #include "i915_drv.h"
138 #include "intel_mocs.h"
140 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
141 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
142 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144 #define RING_EXECLIST_QFULL (1 << 0x2)
145 #define RING_EXECLIST1_VALID (1 << 0x3)
146 #define RING_EXECLIST0_VALID (1 << 0x4)
147 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
148 #define RING_EXECLIST1_ACTIVE (1 << 0x11)
149 #define RING_EXECLIST0_ACTIVE (1 << 0x12)
151 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
152 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
153 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
154 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
155 #define GEN8_CTX_STATUS_COMPLETE (1 << 4)
156 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
158 #define CTX_LRI_HEADER_0 0x01
159 #define CTX_CONTEXT_CONTROL 0x02
160 #define CTX_RING_HEAD 0x04
161 #define CTX_RING_TAIL 0x06
162 #define CTX_RING_BUFFER_START 0x08
163 #define CTX_RING_BUFFER_CONTROL 0x0a
164 #define CTX_BB_HEAD_U 0x0c
165 #define CTX_BB_HEAD_L 0x0e
166 #define CTX_BB_STATE 0x10
167 #define CTX_SECOND_BB_HEAD_U 0x12
168 #define CTX_SECOND_BB_HEAD_L 0x14
169 #define CTX_SECOND_BB_STATE 0x16
170 #define CTX_BB_PER_CTX_PTR 0x18
171 #define CTX_RCS_INDIRECT_CTX 0x1a
172 #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
173 #define CTX_LRI_HEADER_1 0x21
174 #define CTX_CTX_TIMESTAMP 0x22
175 #define CTX_PDP3_UDW 0x24
176 #define CTX_PDP3_LDW 0x26
177 #define CTX_PDP2_UDW 0x28
178 #define CTX_PDP2_LDW 0x2a
179 #define CTX_PDP1_UDW 0x2c
180 #define CTX_PDP1_LDW 0x2e
181 #define CTX_PDP0_UDW 0x30
182 #define CTX_PDP0_LDW 0x32
183 #define CTX_LRI_HEADER_2 0x41
184 #define CTX_R_PWR_CLK_STATE 0x42
185 #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
187 #define GEN8_CTX_VALID (1<<0)
188 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
189 #define GEN8_CTX_FORCE_RESTORE (1<<2)
190 #define GEN8_CTX_L3LLC_COHERENT (1<<5)
191 #define GEN8_CTX_PRIVILEGE (1<<8)
193 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
194 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
195 (reg_state)[(pos)+1] = (val); \
198 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
199 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
200 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
201 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
204 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
205 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
206 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
210 ADVANCED_CONTEXT = 0,
215 #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
216 #define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
217 LEGACY_64B_CONTEXT :\
221 FAULT_AND_HALT, /* Debug only */
223 FAULT_AND_CONTINUE /* Unsupported */
225 #define GEN8_CTX_ID_SHIFT 32
226 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
227 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
229 static int intel_lr_context_pin(struct intel_context *ctx,
230 struct intel_engine_cs *engine);
231 static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
232 struct drm_i915_gem_object *default_ctx_obj);
236 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
238 * @enable_execlists: value of i915.enable_execlists module parameter.
240 * Only certain platforms support Execlists (the prerequisites being
241 * support for Logical Ring Contexts and Aliasing PPGTT or better).
243 * Return: 1 if Execlists is supported and has to be enabled.
245 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
247 WARN_ON(i915.enable_ppgtt == -1);
249 /* On platforms with execlist available, vGPU will only
250 * support execlist mode, no ring buffer mode.
252 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
255 if (INTEL_INFO(dev)->gen >= 9)
258 if (enable_execlists == 0)
261 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
262 i915.use_mmio_flip >= 0)
269 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
271 struct drm_device *dev = engine->dev;
273 if (IS_GEN8(dev) || IS_GEN9(dev))
274 engine->idle_lite_restore_wa = ~0;
276 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
277 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
278 (engine->id == VCS || engine->id == VCS2);
280 engine->ctx_desc_template = GEN8_CTX_VALID;
281 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
282 GEN8_CTX_ADDRESSING_MODE_SHIFT;
284 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
285 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
287 /* TODO: WaDisableLiteRestore when we start using semaphore
288 * signalling between Command Streamers */
289 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
291 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
292 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
293 if (engine->disable_lite_restore_wa)
294 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
298 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
299 * descriptor for a pinned context
301 * @ctx: Context to work on
302 * @ring: Engine the descriptor will be used with
304 * The context descriptor encodes various attributes of a context,
305 * including its GTT address and some flags. Because it's fairly
306 * expensive to calculate, we'll just do it once and cache the result,
307 * which remains valid until the context is unpinned.
309 * This is what a descriptor looks like, from LSB to MSB:
310 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
311 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
312 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
313 * bits 52-63: reserved, may encode the engine ID (for GuC)
316 intel_lr_context_descriptor_update(struct intel_context *ctx,
317 struct intel_engine_cs *engine)
321 lrca = ctx->engine[engine->id].lrc_vma->node.start +
322 LRC_PPHWSP_PN * PAGE_SIZE;
324 desc = engine->ctx_desc_template; /* bits 0-11 */
325 desc |= lrca; /* bits 12-31 */
326 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
328 ctx->engine[engine->id].lrc_desc = desc;
331 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
332 struct intel_engine_cs *engine)
334 return ctx->engine[engine->id].lrc_desc;
338 * intel_execlists_ctx_id() - get the Execlists Context ID
339 * @ctx: Context to get the ID for
340 * @ring: Engine to get the ID for
342 * Do not confuse with ctx->id! Unfortunately we have a name overload
343 * here: the old context ID we pass to userspace as a handler so that
344 * they can refer to a context, and the new context ID we pass to the
345 * ELSP so that the GPU can inform us of the context status via
348 * The context ID is a portion of the context descriptor, so we can
349 * just extract the required part from the cached descriptor.
351 * Return: 20-bits globally unique context ID.
353 u32 intel_execlists_ctx_id(struct intel_context *ctx,
354 struct intel_engine_cs *engine)
356 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
359 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
360 struct drm_i915_gem_request *rq1)
363 struct intel_engine_cs *engine = rq0->engine;
364 struct drm_device *dev = engine->dev;
365 struct drm_i915_private *dev_priv = dev->dev_private;
369 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
370 rq1->elsp_submitted++;
375 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
376 rq0->elsp_submitted++;
378 /* You must always write both descriptors in the order below. */
379 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
380 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
382 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
383 /* The context is automatically loaded after the following */
384 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
386 /* ELSP is a wo register, use another nearby reg for posting */
387 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
391 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
393 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
394 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
395 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
396 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
399 static void execlists_update_context(struct drm_i915_gem_request *rq)
401 struct intel_engine_cs *engine = rq->engine;
402 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
403 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
405 reg_state[CTX_RING_TAIL+1] = rq->tail;
407 /* True 32b PPGTT with dynamic page allocation: update PDP
408 * registers and point the unallocated PDPs to scratch page.
409 * PML4 is allocated during ppgtt init, so this is not needed
412 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
413 execlists_update_context_pdps(ppgtt, reg_state);
416 static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
417 struct drm_i915_gem_request *rq1)
419 execlists_update_context(rq0);
422 execlists_update_context(rq1);
424 execlists_elsp_write(rq0, rq1);
427 static void execlists_context_unqueue__locked(struct intel_engine_cs *engine)
429 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
430 struct drm_i915_gem_request *cursor, *tmp;
432 assert_spin_locked(&engine->execlist_lock);
435 * If irqs are not active generate a warning as batches that finish
436 * without the irqs may get lost and a GPU Hang may occur.
438 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
440 /* Try to read in pairs */
441 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
445 } else if (req0->ctx == cursor->ctx) {
446 /* Same ctx: ignore first request, as second request
447 * will update tail past first request's workload */
448 cursor->elsp_submitted = req0->elsp_submitted;
449 list_move_tail(&req0->execlist_link,
450 &engine->execlist_retired_req_list);
454 WARN_ON(req1->elsp_submitted);
462 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
464 * WaIdleLiteRestore: make sure we never cause a lite restore
467 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
468 * resubmit the request. See gen8_emit_request() for where we
469 * prepare the padding after the end of the request.
471 struct intel_ringbuffer *ringbuf;
473 ringbuf = req0->ctx->engine[engine->id].ringbuf;
475 req0->tail &= ringbuf->size - 1;
478 execlists_submit_requests(req0, req1);
481 static void execlists_context_unqueue(struct intel_engine_cs *engine)
483 struct drm_i915_private *dev_priv = engine->dev->dev_private;
485 spin_lock(&dev_priv->uncore.lock);
486 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
488 execlists_context_unqueue__locked(engine);
490 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
491 spin_unlock(&dev_priv->uncore.lock);
495 execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
497 struct drm_i915_gem_request *head_req;
499 assert_spin_locked(&engine->execlist_lock);
501 head_req = list_first_entry_or_null(&engine->execlist_queue,
502 struct drm_i915_gem_request,
508 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
511 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
513 if (--head_req->elsp_submitted > 0)
516 list_move_tail(&head_req->execlist_link,
517 &engine->execlist_retired_req_list);
523 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
526 struct drm_i915_private *dev_priv = engine->dev->dev_private;
529 read_pointer %= GEN8_CSB_ENTRIES;
531 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
533 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
536 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
543 * intel_lrc_irq_handler() - handle Context Switch interrupts
544 * @ring: Engine Command Streamer to handle.
546 * Check the unread Context Status Buffers and manage the submission of new
547 * contexts to the ELSP accordingly.
549 void intel_lrc_irq_handler(struct intel_engine_cs *engine)
551 struct drm_i915_private *dev_priv = engine->dev->dev_private;
553 unsigned int read_pointer, write_pointer;
556 unsigned int submit_contexts = 0;
558 spin_lock(&engine->execlist_lock);
560 spin_lock(&dev_priv->uncore.lock);
561 intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
563 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
565 read_pointer = engine->next_context_status_buffer;
566 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
567 if (read_pointer > write_pointer)
568 write_pointer += GEN8_CSB_ENTRIES;
570 while (read_pointer < write_pointer) {
571 status = get_context_status(engine, ++read_pointer,
574 if (unlikely(status & GEN8_CTX_STATUS_PREEMPTED)) {
575 if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
576 if (execlists_check_remove_request(engine, status_id))
577 WARN(1, "Lite Restored request removed from queue\n");
579 WARN(1, "Preemption without Lite Restore\n");
582 if (status & (GEN8_CTX_STATUS_ACTIVE_IDLE |
583 GEN8_CTX_STATUS_ELEMENT_SWITCH))
585 execlists_check_remove_request(engine,
589 if (submit_contexts) {
590 if (!engine->disable_lite_restore_wa ||
591 (status & GEN8_CTX_STATUS_ACTIVE_IDLE))
592 execlists_context_unqueue__locked(engine);
595 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
597 /* Update the read pointer to the old write pointer. Manual ringbuffer
598 * management ftw </sarcasm> */
599 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
600 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
601 engine->next_context_status_buffer << 8));
603 intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
604 spin_unlock(&dev_priv->uncore.lock);
606 spin_unlock(&engine->execlist_lock);
608 if (unlikely(submit_contexts > 2))
609 DRM_ERROR("More than two context complete events?\n");
612 static void execlists_context_queue(struct drm_i915_gem_request *request)
614 struct intel_engine_cs *engine = request->engine;
615 struct drm_i915_gem_request *cursor;
616 int num_elements = 0;
618 if (request->ctx != request->i915->kernel_context)
619 intel_lr_context_pin(request->ctx, engine);
621 i915_gem_request_reference(request);
623 spin_lock_irq(&engine->execlist_lock);
625 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
626 if (++num_elements > 2)
629 if (num_elements > 2) {
630 struct drm_i915_gem_request *tail_req;
632 tail_req = list_last_entry(&engine->execlist_queue,
633 struct drm_i915_gem_request,
636 if (request->ctx == tail_req->ctx) {
637 WARN(tail_req->elsp_submitted != 0,
638 "More than 2 already-submitted reqs queued\n");
639 list_move_tail(&tail_req->execlist_link,
640 &engine->execlist_retired_req_list);
644 list_add_tail(&request->execlist_link, &engine->execlist_queue);
645 if (num_elements == 0)
646 execlists_context_unqueue(engine);
648 spin_unlock_irq(&engine->execlist_lock);
651 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
653 struct intel_engine_cs *engine = req->engine;
654 uint32_t flush_domains;
658 if (engine->gpu_caches_dirty)
659 flush_domains = I915_GEM_GPU_DOMAINS;
661 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
665 engine->gpu_caches_dirty = false;
669 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
670 struct list_head *vmas)
672 const unsigned other_rings = ~intel_engine_flag(req->engine);
673 struct i915_vma *vma;
674 uint32_t flush_domains = 0;
675 bool flush_chipset = false;
678 list_for_each_entry(vma, vmas, exec_list) {
679 struct drm_i915_gem_object *obj = vma->obj;
681 if (obj->active & other_rings) {
682 ret = i915_gem_object_sync(obj, req->engine, &req);
687 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
688 flush_chipset |= i915_gem_clflush_object(obj, false);
690 flush_domains |= obj->base.write_domain;
693 if (flush_domains & I915_GEM_DOMAIN_GTT)
696 /* Unconditionally invalidate gpu caches and ensure that we do flush
697 * any residual writes from the previous batch.
699 return logical_ring_invalidate_all_caches(req);
702 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
706 request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
708 if (i915.enable_guc_submission) {
710 * Check that the GuC has space for the request before
711 * going any further, as the i915_add_request() call
712 * later on mustn't fail ...
714 struct intel_guc *guc = &request->i915->guc;
716 ret = i915_guc_wq_check_space(guc->execbuf_client);
721 if (request->ctx != request->i915->kernel_context)
722 ret = intel_lr_context_pin(request->ctx, request->engine);
727 static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
730 struct intel_ringbuffer *ringbuf = req->ringbuf;
731 struct intel_engine_cs *engine = req->engine;
732 struct drm_i915_gem_request *target;
736 if (intel_ring_space(ringbuf) >= bytes)
739 /* The whole point of reserving space is to not wait! */
740 WARN_ON(ringbuf->reserved_in_use);
742 list_for_each_entry(target, &engine->request_list, list) {
744 * The request queue is per-engine, so can contain requests
745 * from multiple ringbuffers. Here, we must ignore any that
746 * aren't from the ringbuffer we're considering.
748 if (target->ringbuf != ringbuf)
751 /* Would completion of this request free enough space? */
752 space = __intel_ring_space(target->postfix, ringbuf->tail,
758 if (WARN_ON(&target->list == &engine->request_list))
761 ret = i915_wait_request(target);
765 ringbuf->space = space;
770 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
771 * @request: Request to advance the logical ringbuffer of.
773 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
774 * really happens during submission is that the context and current tail will be placed
775 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
776 * point, the tail *inside* the context is updated and the ELSP written to.
779 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
781 struct intel_ringbuffer *ringbuf = request->ringbuf;
782 struct drm_i915_private *dev_priv = request->i915;
783 struct intel_engine_cs *engine = request->engine;
785 intel_logical_ring_advance(ringbuf);
786 request->tail = ringbuf->tail;
789 * Here we add two extra NOOPs as padding to avoid
790 * lite restore of a context with HEAD==TAIL.
792 * Caller must reserve WA_TAIL_DWORDS for us!
794 intel_logical_ring_emit(ringbuf, MI_NOOP);
795 intel_logical_ring_emit(ringbuf, MI_NOOP);
796 intel_logical_ring_advance(ringbuf);
798 if (intel_ring_stopped(engine))
801 if (engine->last_context != request->ctx) {
802 if (engine->last_context)
803 intel_lr_context_unpin(engine->last_context, engine);
804 if (request->ctx != request->i915->kernel_context) {
805 intel_lr_context_pin(request->ctx, engine);
806 engine->last_context = request->ctx;
808 engine->last_context = NULL;
812 if (dev_priv->guc.execbuf_client)
813 i915_guc_submit(dev_priv->guc.execbuf_client, request);
815 execlists_context_queue(request);
820 static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
822 uint32_t __iomem *virt;
823 int rem = ringbuf->size - ringbuf->tail;
825 virt = ringbuf->virtual_start + ringbuf->tail;
828 iowrite32(MI_NOOP, virt++);
831 intel_ring_update_space(ringbuf);
834 static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
836 struct intel_ringbuffer *ringbuf = req->ringbuf;
837 int remain_usable = ringbuf->effective_size - ringbuf->tail;
838 int remain_actual = ringbuf->size - ringbuf->tail;
839 int ret, total_bytes, wait_bytes = 0;
840 bool need_wrap = false;
842 if (ringbuf->reserved_in_use)
845 total_bytes = bytes + ringbuf->reserved_size;
847 if (unlikely(bytes > remain_usable)) {
849 * Not enough space for the basic request. So need to flush
850 * out the remainder and then wait for base + reserved.
852 wait_bytes = remain_actual + total_bytes;
855 if (unlikely(total_bytes > remain_usable)) {
857 * The base request will fit but the reserved space
858 * falls off the end. So only need to to wait for the
859 * reserved size after flushing out the remainder.
861 wait_bytes = remain_actual + ringbuf->reserved_size;
863 } else if (total_bytes > ringbuf->space) {
864 /* No wrapping required, just waiting. */
865 wait_bytes = total_bytes;
870 ret = logical_ring_wait_for_space(req, wait_bytes);
875 __wrap_ring_buffer(ringbuf);
882 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
884 * @req: The request to start some new work for
885 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
887 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
888 * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
889 * and also preallocates a request (every workload submission is still mediated through
890 * requests, same as it did with legacy ringbuffer submission).
892 * Return: non-zero if the ringbuffer is not ready to be written to.
894 int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
896 struct drm_i915_private *dev_priv;
899 WARN_ON(req == NULL);
900 dev_priv = req->engine->dev->dev_private;
902 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
903 dev_priv->mm.interruptible);
907 ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
911 req->ringbuf->space -= num_dwords * sizeof(uint32_t);
915 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
918 * The first call merely notes the reserve request and is common for
919 * all back ends. The subsequent localised _begin() call actually
920 * ensures that the reservation is available. Without the begin, if
921 * the request creator immediately submitted the request without
922 * adding any commands to it then there might not actually be
923 * sufficient room for the submission commands.
925 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
927 return intel_logical_ring_begin(request, 0);
931 * execlists_submission() - submit a batchbuffer for execution, Execlists style
934 * @ring: Engine Command Streamer to submit to.
935 * @ctx: Context to employ for this submission.
936 * @args: execbuffer call arguments.
937 * @vmas: list of vmas.
938 * @batch_obj: the batchbuffer to submit.
939 * @exec_start: batchbuffer start virtual address pointer.
940 * @dispatch_flags: translated execbuffer call flags.
942 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
943 * away the submission details of the execbuffer ioctl call.
945 * Return: non-zero if the submission fails.
947 int intel_execlists_submission(struct i915_execbuffer_params *params,
948 struct drm_i915_gem_execbuffer2 *args,
949 struct list_head *vmas)
951 struct drm_device *dev = params->dev;
952 struct intel_engine_cs *engine = params->engine;
953 struct drm_i915_private *dev_priv = dev->dev_private;
954 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
960 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
961 instp_mask = I915_EXEC_CONSTANTS_MASK;
962 switch (instp_mode) {
963 case I915_EXEC_CONSTANTS_REL_GENERAL:
964 case I915_EXEC_CONSTANTS_ABSOLUTE:
965 case I915_EXEC_CONSTANTS_REL_SURFACE:
966 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
967 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
971 if (instp_mode != dev_priv->relative_constants_mode) {
972 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
973 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
977 /* The HW changed the meaning on this bit on gen6 */
978 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
982 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
986 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
987 DRM_DEBUG("sol reset is gen7 only\n");
991 ret = execlists_move_to_gpu(params->request, vmas);
995 if (engine == &dev_priv->engine[RCS] &&
996 instp_mode != dev_priv->relative_constants_mode) {
997 ret = intel_logical_ring_begin(params->request, 4);
1001 intel_logical_ring_emit(ringbuf, MI_NOOP);
1002 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
1003 intel_logical_ring_emit_reg(ringbuf, INSTPM);
1004 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
1005 intel_logical_ring_advance(ringbuf);
1007 dev_priv->relative_constants_mode = instp_mode;
1010 exec_start = params->batch_obj_vm_offset +
1011 args->batch_start_offset;
1013 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
1017 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1019 i915_gem_execbuffer_move_to_active(vmas, params->request);
1020 i915_gem_execbuffer_retire_commands(params);
1025 void intel_execlists_retire_requests(struct intel_engine_cs *engine)
1027 struct drm_i915_gem_request *req, *tmp;
1028 struct list_head retired_list;
1030 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
1031 if (list_empty(&engine->execlist_retired_req_list))
1034 INIT_LIST_HEAD(&retired_list);
1035 spin_lock_irq(&engine->execlist_lock);
1036 list_replace_init(&engine->execlist_retired_req_list, &retired_list);
1037 spin_unlock_irq(&engine->execlist_lock);
1039 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
1040 struct intel_context *ctx = req->ctx;
1041 struct drm_i915_gem_object *ctx_obj =
1042 ctx->engine[engine->id].state;
1044 if (ctx_obj && (ctx != req->i915->kernel_context))
1045 intel_lr_context_unpin(ctx, engine);
1047 list_del(&req->execlist_link);
1048 i915_gem_request_unreference(req);
1052 void intel_logical_ring_stop(struct intel_engine_cs *engine)
1054 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1057 if (!intel_ring_initialized(engine))
1060 ret = intel_engine_idle(engine);
1061 if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
1062 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1065 /* TODO: Is this correct with Execlists enabled? */
1066 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
1067 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
1068 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
1071 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
1074 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
1076 struct intel_engine_cs *engine = req->engine;
1079 if (!engine->gpu_caches_dirty)
1082 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
1086 engine->gpu_caches_dirty = false;
1090 static int intel_lr_context_do_pin(struct intel_context *ctx,
1091 struct intel_engine_cs *engine)
1093 struct drm_device *dev = engine->dev;
1094 struct drm_i915_private *dev_priv = dev->dev_private;
1095 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1096 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
1097 struct page *lrc_state_page;
1098 uint32_t *lrc_reg_state;
1101 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
1103 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
1104 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
1108 lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
1109 if (WARN_ON(!lrc_state_page)) {
1114 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
1118 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
1119 intel_lr_context_descriptor_update(ctx, engine);
1120 lrc_reg_state = kmap(lrc_state_page);
1121 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
1122 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
1123 ctx_obj->dirty = true;
1125 /* Invalidate GuC TLB. */
1126 if (i915.enable_guc_submission)
1127 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
1132 i915_gem_object_ggtt_unpin(ctx_obj);
1137 static int intel_lr_context_pin(struct intel_context *ctx,
1138 struct intel_engine_cs *engine)
1142 if (ctx->engine[engine->id].pin_count++ == 0) {
1143 ret = intel_lr_context_do_pin(ctx, engine);
1145 goto reset_pin_count;
1147 i915_gem_context_reference(ctx);
1152 ctx->engine[engine->id].pin_count = 0;
1156 void intel_lr_context_unpin(struct intel_context *ctx,
1157 struct intel_engine_cs *engine)
1159 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1161 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
1162 if (--ctx->engine[engine->id].pin_count == 0) {
1163 kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
1164 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
1165 i915_gem_object_ggtt_unpin(ctx_obj);
1166 ctx->engine[engine->id].lrc_vma = NULL;
1167 ctx->engine[engine->id].lrc_desc = 0;
1168 ctx->engine[engine->id].lrc_reg_state = NULL;
1170 i915_gem_context_unreference(ctx);
1174 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1177 struct intel_engine_cs *engine = req->engine;
1178 struct intel_ringbuffer *ringbuf = req->ringbuf;
1179 struct drm_device *dev = engine->dev;
1180 struct drm_i915_private *dev_priv = dev->dev_private;
1181 struct i915_workarounds *w = &dev_priv->workarounds;
1186 engine->gpu_caches_dirty = true;
1187 ret = logical_ring_flush_all_caches(req);
1191 ret = intel_logical_ring_begin(req, w->count * 2 + 2);
1195 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1196 for (i = 0; i < w->count; i++) {
1197 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
1198 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1200 intel_logical_ring_emit(ringbuf, MI_NOOP);
1202 intel_logical_ring_advance(ringbuf);
1204 engine->gpu_caches_dirty = true;
1205 ret = logical_ring_flush_all_caches(req);
1212 #define wa_ctx_emit(batch, index, cmd) \
1214 int __index = (index)++; \
1215 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
1218 batch[__index] = (cmd); \
1221 #define wa_ctx_emit_reg(batch, index, reg) \
1222 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
1225 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1226 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1227 * but there is a slight complication as this is applied in WA batch where the
1228 * values are only initialized once so we cannot take register value at the
1229 * beginning and reuse it further; hence we save its value to memory, upload a
1230 * constant value with bit21 set and then we restore it back with the saved value.
1231 * To simplify the WA, a constant value is formed by using the default value
1232 * of this register. This shouldn't be a problem because we are only modifying
1233 * it for a short period and this batch in non-premptible. We can ofcourse
1234 * use additional instructions that read the actual value of the register
1235 * at that time and set our bit of interest but it makes the WA complicated.
1237 * This WA is also required for Gen9 so extracting as a function avoids
1240 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1241 uint32_t *const batch,
1244 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1247 * WaDisableLSQCROPERFforOCL:skl
1248 * This WA is implemented in skl_init_clock_gating() but since
1249 * this batch updates GEN8_L3SQCREG4 with default value we need to
1250 * set this bit here to retain the WA during flush.
1252 if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
1253 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1255 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
1256 MI_SRM_LRM_GLOBAL_GTT));
1257 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1258 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1259 wa_ctx_emit(batch, index, 0);
1261 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1262 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1263 wa_ctx_emit(batch, index, l3sqc4_flush);
1265 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1266 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1267 PIPE_CONTROL_DC_FLUSH_ENABLE));
1268 wa_ctx_emit(batch, index, 0);
1269 wa_ctx_emit(batch, index, 0);
1270 wa_ctx_emit(batch, index, 0);
1271 wa_ctx_emit(batch, index, 0);
1273 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
1274 MI_SRM_LRM_GLOBAL_GTT));
1275 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1276 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1277 wa_ctx_emit(batch, index, 0);
1282 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1284 uint32_t start_alignment)
1286 return wa_ctx->offset = ALIGN(offset, start_alignment);
1289 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1291 uint32_t size_alignment)
1293 wa_ctx->size = offset - wa_ctx->offset;
1295 WARN(wa_ctx->size % size_alignment,
1296 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1297 wa_ctx->size, size_alignment);
1302 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1304 * @ring: only applicable for RCS
1305 * @wa_ctx: structure representing wa_ctx
1306 * offset: specifies start of the batch, should be cache-aligned. This is updated
1307 * with the offset value received as input.
1308 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1309 * @batch: page in which WA are loaded
1310 * @offset: This field specifies the start of the batch, it should be
1311 * cache-aligned otherwise it is adjusted accordingly.
1312 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1313 * initialized at the beginning and shared across all contexts but this field
1314 * helps us to have multiple batches at different offsets and select them based
1315 * on a criteria. At the moment this batch always start at the beginning of the page
1316 * and at this point we don't have multiple wa_ctx batch buffers.
1318 * The number of WA applied are not known at the beginning; we use this field
1319 * to return the no of DWORDS written.
1321 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1322 * so it adds NOOPs as padding to make it cacheline aligned.
1323 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1324 * makes a complete batch buffer.
1326 * Return: non-zero if we exceed the PAGE_SIZE limit.
1329 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1330 struct i915_wa_ctx_bb *wa_ctx,
1331 uint32_t *const batch,
1334 uint32_t scratch_addr;
1335 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1337 /* WaDisableCtxRestoreArbitration:bdw,chv */
1338 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1340 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1341 if (IS_BROADWELL(engine->dev)) {
1342 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1348 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1349 /* Actual scratch location is at 128 bytes offset */
1350 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1352 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1353 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1354 PIPE_CONTROL_GLOBAL_GTT_IVB |
1355 PIPE_CONTROL_CS_STALL |
1356 PIPE_CONTROL_QW_WRITE));
1357 wa_ctx_emit(batch, index, scratch_addr);
1358 wa_ctx_emit(batch, index, 0);
1359 wa_ctx_emit(batch, index, 0);
1360 wa_ctx_emit(batch, index, 0);
1362 /* Pad to end of cacheline */
1363 while (index % CACHELINE_DWORDS)
1364 wa_ctx_emit(batch, index, MI_NOOP);
1367 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1368 * execution depends on the length specified in terms of cache lines
1369 * in the register CTX_RCS_INDIRECT_CTX
1372 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1376 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1378 * @ring: only applicable for RCS
1379 * @wa_ctx: structure representing wa_ctx
1380 * offset: specifies start of the batch, should be cache-aligned.
1381 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1382 * @batch: page in which WA are loaded
1383 * @offset: This field specifies the start of this batch.
1384 * This batch is started immediately after indirect_ctx batch. Since we ensure
1385 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1387 * The number of DWORDS written are returned using this field.
1389 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1390 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1392 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
1393 struct i915_wa_ctx_bb *wa_ctx,
1394 uint32_t *const batch,
1397 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1399 /* WaDisableCtxRestoreArbitration:bdw,chv */
1400 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1402 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1404 return wa_ctx_end(wa_ctx, *offset = index, 1);
1407 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1408 struct i915_wa_ctx_bb *wa_ctx,
1409 uint32_t *const batch,
1413 struct drm_device *dev = engine->dev;
1414 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1416 /* WaDisableCtxRestoreArbitration:skl,bxt */
1417 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
1418 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1419 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1421 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1422 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1427 /* Pad to end of cacheline */
1428 while (index % CACHELINE_DWORDS)
1429 wa_ctx_emit(batch, index, MI_NOOP);
1431 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1434 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1435 struct i915_wa_ctx_bb *wa_ctx,
1436 uint32_t *const batch,
1439 struct drm_device *dev = engine->dev;
1440 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1442 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1443 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
1444 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1445 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1446 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1447 wa_ctx_emit(batch, index,
1448 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1449 wa_ctx_emit(batch, index, MI_NOOP);
1452 /* WaDisableCtxRestoreArbitration:skl,bxt */
1453 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
1454 IS_BXT_REVID(dev, 0, BXT_REVID_A1))
1455 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1457 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1459 return wa_ctx_end(wa_ctx, *offset = index, 1);
1462 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1466 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
1468 if (!engine->wa_ctx.obj) {
1469 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1473 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
1475 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1477 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1484 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1486 if (engine->wa_ctx.obj) {
1487 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1488 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1489 engine->wa_ctx.obj = NULL;
1493 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1499 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1501 WARN_ON(engine->id != RCS);
1503 /* update this when WA for higher Gen are added */
1504 if (INTEL_INFO(engine->dev)->gen > 9) {
1505 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1506 INTEL_INFO(engine->dev)->gen);
1510 /* some WA perform writes to scratch page, ensure it is valid */
1511 if (engine->scratch.obj == NULL) {
1512 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1516 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1518 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1522 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
1523 batch = kmap_atomic(page);
1526 if (INTEL_INFO(engine->dev)->gen == 8) {
1527 ret = gen8_init_indirectctx_bb(engine,
1528 &wa_ctx->indirect_ctx,
1534 ret = gen8_init_perctx_bb(engine,
1540 } else if (INTEL_INFO(engine->dev)->gen == 9) {
1541 ret = gen9_init_indirectctx_bb(engine,
1542 &wa_ctx->indirect_ctx,
1548 ret = gen9_init_perctx_bb(engine,
1557 kunmap_atomic(batch);
1559 lrc_destroy_wa_ctx_obj(engine);
1564 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1566 struct drm_device *dev = engine->dev;
1567 struct drm_i915_private *dev_priv = dev->dev_private;
1568 unsigned int next_context_status_buffer_hw;
1570 lrc_setup_hardware_status_page(engine,
1571 dev_priv->kernel_context->engine[engine->id].state);
1573 I915_WRITE_IMR(engine,
1574 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1575 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1577 I915_WRITE(RING_MODE_GEN7(engine),
1578 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1579 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1580 POSTING_READ(RING_MODE_GEN7(engine));
1583 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1584 * zero, we need to read the write pointer from hardware and use its
1585 * value because "this register is power context save restored".
1586 * Effectively, these states have been observed:
1588 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1589 * BDW | CSB regs not reset | CSB regs reset |
1590 * CHT | CSB regs not reset | CSB regs not reset |
1594 next_context_status_buffer_hw =
1595 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
1598 * When the CSB registers are reset (also after power-up / gpu reset),
1599 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1600 * this special case, so the first element read is CSB[0].
1602 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1603 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1605 engine->next_context_status_buffer = next_context_status_buffer_hw;
1606 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1608 memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
1613 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1615 struct drm_device *dev = engine->dev;
1616 struct drm_i915_private *dev_priv = dev->dev_private;
1619 ret = gen8_init_common_ring(engine);
1623 /* We need to disable the AsyncFlip performance optimisations in order
1624 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1625 * programmed to '1' on all products.
1627 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1629 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1631 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1633 return init_workarounds_ring(engine);
1636 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1640 ret = gen8_init_common_ring(engine);
1644 return init_workarounds_ring(engine);
1647 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1649 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1650 struct intel_engine_cs *engine = req->engine;
1651 struct intel_ringbuffer *ringbuf = req->ringbuf;
1652 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1655 ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
1659 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1660 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1661 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1663 intel_logical_ring_emit_reg(ringbuf,
1664 GEN8_RING_PDP_UDW(engine, i));
1665 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
1666 intel_logical_ring_emit_reg(ringbuf,
1667 GEN8_RING_PDP_LDW(engine, i));
1668 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1671 intel_logical_ring_emit(ringbuf, MI_NOOP);
1672 intel_logical_ring_advance(ringbuf);
1677 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1678 u64 offset, unsigned dispatch_flags)
1680 struct intel_ringbuffer *ringbuf = req->ringbuf;
1681 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1684 /* Don't rely in hw updating PDPs, specially in lite-restore.
1685 * Ideally, we should set Force PD Restore in ctx descriptor,
1686 * but we can't. Force Restore would be a second option, but
1687 * it is unsafe in case of lite-restore (because the ctx is
1688 * not idle). PML4 is allocated during ppgtt init so this is
1689 * not needed in 48-bit.*/
1690 if (req->ctx->ppgtt &&
1691 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1692 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1693 !intel_vgpu_active(req->i915->dev)) {
1694 ret = intel_logical_ring_emit_pdps(req);
1699 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1702 ret = intel_logical_ring_begin(req, 4);
1706 /* FIXME(BDW): Address space and security selectors. */
1707 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1709 (dispatch_flags & I915_DISPATCH_RS ?
1710 MI_BATCH_RESOURCE_STREAMER : 0));
1711 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1712 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1713 intel_logical_ring_emit(ringbuf, MI_NOOP);
1714 intel_logical_ring_advance(ringbuf);
1719 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1721 struct drm_device *dev = engine->dev;
1722 struct drm_i915_private *dev_priv = dev->dev_private;
1723 unsigned long flags;
1725 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1728 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1729 if (engine->irq_refcount++ == 0) {
1730 I915_WRITE_IMR(engine,
1731 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1732 POSTING_READ(RING_IMR(engine->mmio_base));
1734 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1739 static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
1741 struct drm_device *dev = engine->dev;
1742 struct drm_i915_private *dev_priv = dev->dev_private;
1743 unsigned long flags;
1745 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1746 if (--engine->irq_refcount == 0) {
1747 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1748 POSTING_READ(RING_IMR(engine->mmio_base));
1750 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1753 static int gen8_emit_flush(struct drm_i915_gem_request *request,
1754 u32 invalidate_domains,
1757 struct intel_ringbuffer *ringbuf = request->ringbuf;
1758 struct intel_engine_cs *engine = ringbuf->engine;
1759 struct drm_device *dev = engine->dev;
1760 struct drm_i915_private *dev_priv = dev->dev_private;
1764 ret = intel_logical_ring_begin(request, 4);
1768 cmd = MI_FLUSH_DW + 1;
1770 /* We always require a command barrier so that subsequent
1771 * commands, such as breadcrumb interrupts, are strictly ordered
1772 * wrt the contents of the write cache being flushed to memory
1773 * (and thus being coherent from the CPU).
1775 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1777 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1778 cmd |= MI_INVALIDATE_TLB;
1779 if (engine == &dev_priv->engine[VCS])
1780 cmd |= MI_INVALIDATE_BSD;
1783 intel_logical_ring_emit(ringbuf, cmd);
1784 intel_logical_ring_emit(ringbuf,
1785 I915_GEM_HWS_SCRATCH_ADDR |
1786 MI_FLUSH_DW_USE_GTT);
1787 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1788 intel_logical_ring_emit(ringbuf, 0); /* value */
1789 intel_logical_ring_advance(ringbuf);
1794 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1795 u32 invalidate_domains,
1798 struct intel_ringbuffer *ringbuf = request->ringbuf;
1799 struct intel_engine_cs *engine = ringbuf->engine;
1800 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1801 bool vf_flush_wa = false;
1805 flags |= PIPE_CONTROL_CS_STALL;
1807 if (flush_domains) {
1808 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1809 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1810 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1811 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1814 if (invalidate_domains) {
1815 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1816 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1817 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1818 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1819 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1820 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1821 flags |= PIPE_CONTROL_QW_WRITE;
1822 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1825 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1828 if (IS_GEN9(engine->dev))
1832 ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
1837 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1838 intel_logical_ring_emit(ringbuf, 0);
1839 intel_logical_ring_emit(ringbuf, 0);
1840 intel_logical_ring_emit(ringbuf, 0);
1841 intel_logical_ring_emit(ringbuf, 0);
1842 intel_logical_ring_emit(ringbuf, 0);
1845 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1846 intel_logical_ring_emit(ringbuf, flags);
1847 intel_logical_ring_emit(ringbuf, scratch_addr);
1848 intel_logical_ring_emit(ringbuf, 0);
1849 intel_logical_ring_emit(ringbuf, 0);
1850 intel_logical_ring_emit(ringbuf, 0);
1851 intel_logical_ring_advance(ringbuf);
1856 static u32 gen8_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
1858 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1861 static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1863 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1866 static u32 bxt_a_get_seqno(struct intel_engine_cs *engine,
1867 bool lazy_coherency)
1871 * On BXT A steppings there is a HW coherency issue whereby the
1872 * MI_STORE_DATA_IMM storing the completed request's seqno
1873 * occasionally doesn't invalidate the CPU cache. Work around this by
1874 * clflushing the corresponding cacheline whenever the caller wants
1875 * the coherency to be guaranteed. Note that this cacheline is known
1876 * to be clean at this point, since we only write it in
1877 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1878 * this clflush in practice becomes an invalidate operation.
1881 if (!lazy_coherency)
1882 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1884 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1887 static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1889 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1891 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1892 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1896 * Reserve space for 2 NOOPs at the end of each request to be
1897 * used as a workaround for not being allowed to do lite
1898 * restore with HEAD==TAIL (WaIdleLiteRestore).
1900 #define WA_TAIL_DWORDS 2
1902 static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
1904 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
1907 static int gen8_emit_request(struct drm_i915_gem_request *request)
1909 struct intel_ringbuffer *ringbuf = request->ringbuf;
1912 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
1916 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1917 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1919 intel_logical_ring_emit(ringbuf,
1920 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1921 intel_logical_ring_emit(ringbuf,
1922 hws_seqno_address(request->engine) |
1923 MI_FLUSH_DW_USE_GTT);
1924 intel_logical_ring_emit(ringbuf, 0);
1925 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1926 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1927 intel_logical_ring_emit(ringbuf, MI_NOOP);
1928 return intel_logical_ring_advance_and_submit(request);
1931 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1933 struct intel_ringbuffer *ringbuf = request->ringbuf;
1936 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
1940 /* w/a for post sync ops following a GPGPU operation we
1941 * need a prior CS_STALL, which is emitted by the flush
1942 * following the batch.
1944 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
1945 intel_logical_ring_emit(ringbuf,
1946 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1947 PIPE_CONTROL_CS_STALL |
1948 PIPE_CONTROL_QW_WRITE));
1949 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
1950 intel_logical_ring_emit(ringbuf, 0);
1951 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1952 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1953 return intel_logical_ring_advance_and_submit(request);
1956 static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
1958 struct render_state so;
1961 ret = i915_gem_render_state_prepare(req->engine, &so);
1965 if (so.rodata == NULL)
1968 ret = req->engine->emit_bb_start(req, so.ggtt_offset,
1969 I915_DISPATCH_SECURE);
1973 ret = req->engine->emit_bb_start(req,
1974 (so.ggtt_offset + so.aux_batch_offset),
1975 I915_DISPATCH_SECURE);
1979 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
1982 i915_gem_render_state_fini(&so);
1986 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1990 ret = intel_logical_ring_workarounds_emit(req);
1994 ret = intel_rcs_context_init_mocs(req);
1996 * Failing to program the MOCS is non-fatal.The system will not
1997 * run at peak performance. So generate an error and carry on.
2000 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
2002 return intel_lr_context_render_state_init(req);
2006 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
2008 * @ring: Engine Command Streamer.
2011 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
2013 struct drm_i915_private *dev_priv;
2015 if (!intel_ring_initialized(engine))
2018 dev_priv = engine->dev->dev_private;
2020 if (engine->buffer) {
2021 intel_logical_ring_stop(engine);
2022 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
2025 if (engine->cleanup)
2026 engine->cleanup(engine);
2028 i915_cmd_parser_fini_ring(engine);
2029 i915_gem_batch_pool_fini(&engine->batch_pool);
2031 if (engine->status_page.obj) {
2032 kunmap(sg_page(engine->status_page.obj->pages->sgl));
2033 engine->status_page.obj = NULL;
2036 engine->idle_lite_restore_wa = 0;
2037 engine->disable_lite_restore_wa = false;
2038 engine->ctx_desc_template = 0;
2040 lrc_destroy_wa_ctx_obj(engine);
2045 logical_ring_default_vfuncs(struct drm_device *dev,
2046 struct intel_engine_cs *engine)
2048 /* Default vfuncs which can be overriden by each engine. */
2049 engine->init_hw = gen8_init_common_ring;
2050 engine->emit_request = gen8_emit_request;
2051 engine->emit_flush = gen8_emit_flush;
2052 engine->irq_get = gen8_logical_ring_get_irq;
2053 engine->irq_put = gen8_logical_ring_put_irq;
2054 engine->emit_bb_start = gen8_emit_bb_start;
2055 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2056 engine->get_seqno = bxt_a_get_seqno;
2057 engine->set_seqno = bxt_a_set_seqno;
2059 engine->get_seqno = gen8_get_seqno;
2060 engine->set_seqno = gen8_set_seqno;
2065 logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
2067 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2068 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2072 logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
2074 struct intel_context *dctx = to_i915(dev)->kernel_context;
2077 /* Intentionally left blank. */
2078 engine->buffer = NULL;
2081 INIT_LIST_HEAD(&engine->active_list);
2082 INIT_LIST_HEAD(&engine->request_list);
2083 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2084 init_waitqueue_head(&engine->irq_queue);
2086 INIT_LIST_HEAD(&engine->buffers);
2087 INIT_LIST_HEAD(&engine->execlist_queue);
2088 INIT_LIST_HEAD(&engine->execlist_retired_req_list);
2089 spin_lock_init(&engine->execlist_lock);
2091 logical_ring_init_platform_invariants(engine);
2093 ret = i915_cmd_parser_init_ring(engine);
2097 ret = intel_lr_context_deferred_alloc(dctx, engine);
2101 /* As this is the default context, always pin it */
2102 ret = intel_lr_context_do_pin(dctx, engine);
2105 "Failed to pin and map ringbuffer %s: %d\n",
2113 intel_logical_ring_cleanup(engine);
2117 static int logical_render_ring_init(struct drm_device *dev)
2119 struct drm_i915_private *dev_priv = dev->dev_private;
2120 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
2123 engine->name = "render ring";
2125 engine->exec_id = I915_EXEC_RENDER;
2126 engine->guc_id = GUC_RENDER_ENGINE;
2127 engine->mmio_base = RENDER_RING_BASE;
2129 logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
2130 if (HAS_L3_DPF(dev))
2131 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2133 logical_ring_default_vfuncs(dev, engine);
2135 /* Override some for render ring. */
2136 if (INTEL_INFO(dev)->gen >= 9)
2137 engine->init_hw = gen9_init_render_ring;
2139 engine->init_hw = gen8_init_render_ring;
2140 engine->init_context = gen8_init_rcs_context;
2141 engine->cleanup = intel_fini_pipe_control;
2142 engine->emit_flush = gen8_emit_flush_render;
2143 engine->emit_request = gen8_emit_request_render;
2147 ret = intel_init_pipe_control(engine);
2151 ret = intel_init_workaround_bb(engine);
2154 * We continue even if we fail to initialize WA batch
2155 * because we only expect rare glitches but nothing
2156 * critical to prevent us from using GPU
2158 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2162 ret = logical_ring_init(dev, engine);
2164 lrc_destroy_wa_ctx_obj(engine);
2170 static int logical_bsd_ring_init(struct drm_device *dev)
2172 struct drm_i915_private *dev_priv = dev->dev_private;
2173 struct intel_engine_cs *engine = &dev_priv->engine[VCS];
2175 engine->name = "bsd ring";
2177 engine->exec_id = I915_EXEC_BSD;
2178 engine->guc_id = GUC_VIDEO_ENGINE;
2179 engine->mmio_base = GEN6_BSD_RING_BASE;
2181 logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
2182 logical_ring_default_vfuncs(dev, engine);
2184 return logical_ring_init(dev, engine);
2187 static int logical_bsd2_ring_init(struct drm_device *dev)
2189 struct drm_i915_private *dev_priv = dev->dev_private;
2190 struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
2192 engine->name = "bsd2 ring";
2194 engine->exec_id = I915_EXEC_BSD;
2195 engine->guc_id = GUC_VIDEO_ENGINE2;
2196 engine->mmio_base = GEN8_BSD2_RING_BASE;
2198 logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
2199 logical_ring_default_vfuncs(dev, engine);
2201 return logical_ring_init(dev, engine);
2204 static int logical_blt_ring_init(struct drm_device *dev)
2206 struct drm_i915_private *dev_priv = dev->dev_private;
2207 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
2209 engine->name = "blitter ring";
2211 engine->exec_id = I915_EXEC_BLT;
2212 engine->guc_id = GUC_BLITTER_ENGINE;
2213 engine->mmio_base = BLT_RING_BASE;
2215 logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
2216 logical_ring_default_vfuncs(dev, engine);
2218 return logical_ring_init(dev, engine);
2221 static int logical_vebox_ring_init(struct drm_device *dev)
2223 struct drm_i915_private *dev_priv = dev->dev_private;
2224 struct intel_engine_cs *engine = &dev_priv->engine[VECS];
2226 engine->name = "video enhancement ring";
2228 engine->exec_id = I915_EXEC_VEBOX;
2229 engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
2230 engine->mmio_base = VEBOX_RING_BASE;
2232 logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
2233 logical_ring_default_vfuncs(dev, engine);
2235 return logical_ring_init(dev, engine);
2239 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2242 * This function inits the engines for an Execlists submission style (the equivalent in the
2243 * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
2244 * those engines that are present in the hardware.
2246 * Return: non-zero if the initialization failed.
2248 int intel_logical_rings_init(struct drm_device *dev)
2250 struct drm_i915_private *dev_priv = dev->dev_private;
2253 ret = logical_render_ring_init(dev);
2258 ret = logical_bsd_ring_init(dev);
2260 goto cleanup_render_ring;
2264 ret = logical_blt_ring_init(dev);
2266 goto cleanup_bsd_ring;
2269 if (HAS_VEBOX(dev)) {
2270 ret = logical_vebox_ring_init(dev);
2272 goto cleanup_blt_ring;
2275 if (HAS_BSD2(dev)) {
2276 ret = logical_bsd2_ring_init(dev);
2278 goto cleanup_vebox_ring;
2284 intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
2286 intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
2288 intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
2289 cleanup_render_ring:
2290 intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
2296 make_rpcs(struct drm_device *dev)
2301 * No explicit RPCS request is needed to ensure full
2302 * slice/subslice/EU enablement prior to Gen9.
2304 if (INTEL_INFO(dev)->gen < 9)
2308 * Starting in Gen9, render power gating can leave
2309 * slice/subslice/EU in a partially enabled state. We
2310 * must make an explicit request through RPCS for full
2313 if (INTEL_INFO(dev)->has_slice_pg) {
2314 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2315 rpcs |= INTEL_INFO(dev)->slice_total <<
2316 GEN8_RPCS_S_CNT_SHIFT;
2317 rpcs |= GEN8_RPCS_ENABLE;
2320 if (INTEL_INFO(dev)->has_subslice_pg) {
2321 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2322 rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
2323 GEN8_RPCS_SS_CNT_SHIFT;
2324 rpcs |= GEN8_RPCS_ENABLE;
2327 if (INTEL_INFO(dev)->has_eu_pg) {
2328 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2329 GEN8_RPCS_EU_MIN_SHIFT;
2330 rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
2331 GEN8_RPCS_EU_MAX_SHIFT;
2332 rpcs |= GEN8_RPCS_ENABLE;
2338 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2340 u32 indirect_ctx_offset;
2342 switch (INTEL_INFO(engine->dev)->gen) {
2344 MISSING_CASE(INTEL_INFO(engine->dev)->gen);
2347 indirect_ctx_offset =
2348 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2351 indirect_ctx_offset =
2352 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2356 return indirect_ctx_offset;
2360 populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
2361 struct intel_engine_cs *engine,
2362 struct intel_ringbuffer *ringbuf)
2364 struct drm_device *dev = engine->dev;
2365 struct drm_i915_private *dev_priv = dev->dev_private;
2366 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2368 uint32_t *reg_state;
2372 ppgtt = dev_priv->mm.aliasing_ppgtt;
2374 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2376 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2380 ret = i915_gem_object_get_pages(ctx_obj);
2382 DRM_DEBUG_DRIVER("Could not get object pages\n");
2386 i915_gem_object_pin_pages(ctx_obj);
2388 /* The second page of the context object contains some fields which must
2389 * be set up prior to the first execution. */
2390 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
2391 reg_state = kmap_atomic(page);
2393 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2394 * commands followed by (reg, value) pairs. The values we are setting here are
2395 * only for the first context restore: on a subsequent save, the GPU will
2396 * recreate this batchbuffer with new values (including all the missing
2397 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
2398 reg_state[CTX_LRI_HEADER_0] =
2399 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2400 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2401 RING_CONTEXT_CONTROL(engine),
2402 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2403 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2404 (HAS_RESOURCE_STREAMER(dev) ?
2405 CTX_CTRL_RS_CTX_ENABLE : 0)));
2406 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2408 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2410 /* Ring buffer start address is not known until the buffer is pinned.
2411 * It is written to the context image in execlists_update_context()
2413 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2414 RING_START(engine->mmio_base), 0);
2415 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2416 RING_CTL(engine->mmio_base),
2417 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2418 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2419 RING_BBADDR_UDW(engine->mmio_base), 0);
2420 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2421 RING_BBADDR(engine->mmio_base), 0);
2422 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2423 RING_BBSTATE(engine->mmio_base),
2425 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2426 RING_SBBADDR_UDW(engine->mmio_base), 0);
2427 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2428 RING_SBBADDR(engine->mmio_base), 0);
2429 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2430 RING_SBBSTATE(engine->mmio_base), 0);
2431 if (engine->id == RCS) {
2432 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2433 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2434 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2435 RING_INDIRECT_CTX(engine->mmio_base), 0);
2436 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2437 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2438 if (engine->wa_ctx.obj) {
2439 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2440 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2442 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2443 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2444 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2446 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2447 intel_lr_indirect_ctx_offset(engine) << 6;
2449 reg_state[CTX_BB_PER_CTX_PTR+1] =
2450 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2454 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2455 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2456 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2457 /* PDP values well be assigned later if needed */
2458 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2460 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2462 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2464 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2466 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2468 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2470 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2472 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2475 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2476 /* 64b PPGTT (48bit canonical)
2477 * PDP0_DESCRIPTOR contains the base address to PML4 and
2478 * other PDP Descriptors are ignored.
2480 ASSIGN_CTX_PML4(ppgtt, reg_state);
2483 * PDP*_DESCRIPTOR contains the base address of space supported.
2484 * With dynamic page allocation, PDPs may not be allocated at
2485 * this point. Point the unallocated PDPs to the scratch page
2487 execlists_update_context_pdps(ppgtt, reg_state);
2490 if (engine->id == RCS) {
2491 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2492 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2496 kunmap_atomic(reg_state);
2497 i915_gem_object_unpin_pages(ctx_obj);
2503 * intel_lr_context_free() - free the LRC specific bits of a context
2504 * @ctx: the LR context to free.
2506 * The real context freeing is done in i915_gem_context_free: this only
2507 * takes care of the bits that are LRC related: the per-engine backing
2508 * objects and the logical ringbuffer.
2510 void intel_lr_context_free(struct intel_context *ctx)
2514 for (i = I915_NUM_ENGINES; --i >= 0; ) {
2515 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
2516 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
2521 if (ctx == ctx->i915->kernel_context) {
2522 intel_unpin_ringbuffer_obj(ringbuf);
2523 i915_gem_object_ggtt_unpin(ctx_obj);
2526 WARN_ON(ctx->engine[i].pin_count);
2527 intel_ringbuffer_free(ringbuf);
2528 drm_gem_object_unreference(&ctx_obj->base);
2533 * intel_lr_context_size() - return the size of the context for an engine
2534 * @ring: which engine to find the context size for
2536 * Each engine may require a different amount of space for a context image,
2537 * so when allocating (or copying) an image, this function can be used to
2538 * find the right size for the specific engine.
2540 * Return: size (in bytes) of an engine-specific context image
2542 * Note: this size includes the HWSP, which is part of the context image
2543 * in LRC mode, but does not include the "shared data page" used with
2544 * GuC submission. The caller should account for this if using the GuC.
2546 uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2550 WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
2552 switch (engine->id) {
2554 if (INTEL_INFO(engine->dev)->gen >= 9)
2555 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2557 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
2563 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2570 static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
2571 struct drm_i915_gem_object *default_ctx_obj)
2573 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2576 /* The HWSP is part of the default context object in LRC mode. */
2577 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
2578 + LRC_PPHWSP_PN * PAGE_SIZE;
2579 page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
2580 engine->status_page.page_addr = kmap(page);
2581 engine->status_page.obj = default_ctx_obj;
2583 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
2584 (u32)engine->status_page.gfx_addr);
2585 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
2589 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
2590 * @ctx: LR context to create.
2591 * @ring: engine to be used with the context.
2593 * This function can be called more than once, with different engines, if we plan
2594 * to use the context with them. The context backing objects and the ringbuffers
2595 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2596 * the creation is a deferred call: it's better to make sure first that we need to use
2597 * a given ring with the context.
2599 * Return: non-zero on error.
2602 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2603 struct intel_engine_cs *engine)
2605 struct drm_device *dev = engine->dev;
2606 struct drm_i915_gem_object *ctx_obj;
2607 uint32_t context_size;
2608 struct intel_ringbuffer *ringbuf;
2611 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
2612 WARN_ON(ctx->engine[engine->id].state);
2614 context_size = round_up(intel_lr_context_size(engine), 4096);
2616 /* One extra page as the sharing data between driver and GuC */
2617 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2619 ctx_obj = i915_gem_alloc_object(dev, context_size);
2621 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2625 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
2626 if (IS_ERR(ringbuf)) {
2627 ret = PTR_ERR(ringbuf);
2628 goto error_deref_obj;
2631 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
2633 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2637 ctx->engine[engine->id].ringbuf = ringbuf;
2638 ctx->engine[engine->id].state = ctx_obj;
2640 if (ctx != ctx->i915->kernel_context && engine->init_context) {
2641 struct drm_i915_gem_request *req;
2643 req = i915_gem_request_alloc(engine, ctx);
2646 DRM_ERROR("ring create req: %d\n", ret);
2650 ret = engine->init_context(req);
2652 DRM_ERROR("ring init context: %d\n",
2654 i915_gem_request_cancel(req);
2657 i915_add_request_no_flush(req);
2662 intel_ringbuffer_free(ringbuf);
2664 drm_gem_object_unreference(&ctx_obj->base);
2665 ctx->engine[engine->id].ringbuf = NULL;
2666 ctx->engine[engine->id].state = NULL;
2670 void intel_lr_context_reset(struct drm_device *dev,
2671 struct intel_context *ctx)
2673 struct drm_i915_private *dev_priv = dev->dev_private;
2674 struct intel_engine_cs *engine;
2677 for_each_engine(engine, dev_priv, i) {
2678 struct drm_i915_gem_object *ctx_obj =
2679 ctx->engine[engine->id].state;
2680 struct intel_ringbuffer *ringbuf =
2681 ctx->engine[engine->id].ringbuf;
2682 uint32_t *reg_state;
2688 if (i915_gem_object_get_pages(ctx_obj)) {
2689 WARN(1, "Failed get_pages for context obj\n");
2692 page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
2693 reg_state = kmap_atomic(page);
2695 reg_state[CTX_RING_HEAD+1] = 0;
2696 reg_state[CTX_RING_TAIL+1] = 0;
2698 kunmap_atomic(reg_state);