2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
60 * Regarding the creation of contexts, we have:
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
89 * Execlists implementation:
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 * This method works as follows:
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
134 #include <linux/interrupt.h>
136 #include <drm/drmP.h>
137 #include <drm/i915_drm.h>
138 #include "i915_drv.h"
139 #include "intel_mocs.h"
141 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
142 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143 #define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
145 #define RING_EXECLIST_QFULL (1 << 0x2)
146 #define RING_EXECLIST1_VALID (1 << 0x3)
147 #define RING_EXECLIST0_VALID (1 << 0x4)
148 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149 #define RING_EXECLIST1_ACTIVE (1 << 0x11)
150 #define RING_EXECLIST0_ACTIVE (1 << 0x12)
152 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156 #define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
159 #define CTX_LRI_HEADER_0 0x01
160 #define CTX_CONTEXT_CONTROL 0x02
161 #define CTX_RING_HEAD 0x04
162 #define CTX_RING_TAIL 0x06
163 #define CTX_RING_BUFFER_START 0x08
164 #define CTX_RING_BUFFER_CONTROL 0x0a
165 #define CTX_BB_HEAD_U 0x0c
166 #define CTX_BB_HEAD_L 0x0e
167 #define CTX_BB_STATE 0x10
168 #define CTX_SECOND_BB_HEAD_U 0x12
169 #define CTX_SECOND_BB_HEAD_L 0x14
170 #define CTX_SECOND_BB_STATE 0x16
171 #define CTX_BB_PER_CTX_PTR 0x18
172 #define CTX_RCS_INDIRECT_CTX 0x1a
173 #define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
174 #define CTX_LRI_HEADER_1 0x21
175 #define CTX_CTX_TIMESTAMP 0x22
176 #define CTX_PDP3_UDW 0x24
177 #define CTX_PDP3_LDW 0x26
178 #define CTX_PDP2_UDW 0x28
179 #define CTX_PDP2_LDW 0x2a
180 #define CTX_PDP1_UDW 0x2c
181 #define CTX_PDP1_LDW 0x2e
182 #define CTX_PDP0_UDW 0x30
183 #define CTX_PDP0_LDW 0x32
184 #define CTX_LRI_HEADER_2 0x41
185 #define CTX_R_PWR_CLK_STATE 0x42
186 #define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
188 #define GEN8_CTX_VALID (1<<0)
189 #define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190 #define GEN8_CTX_FORCE_RESTORE (1<<2)
191 #define GEN8_CTX_L3LLC_COHERENT (1<<5)
192 #define GEN8_CTX_PRIVILEGE (1<<8)
194 #define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
195 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
196 (reg_state)[(pos)+1] = (val); \
199 #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
200 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
201 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
205 #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
206 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
211 ADVANCED_CONTEXT = 0,
216 #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
217 #define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
218 LEGACY_64B_CONTEXT :\
222 FAULT_AND_HALT, /* Debug only */
224 FAULT_AND_CONTINUE /* Unsupported */
226 #define GEN8_CTX_ID_SHIFT 32
227 #define GEN8_CTX_ID_WIDTH 21
228 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
229 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
231 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
232 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
234 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
235 struct intel_engine_cs *engine);
236 static int intel_lr_context_pin(struct i915_gem_context *ctx,
237 struct intel_engine_cs *engine);
240 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
241 * @dev_priv: i915 device private
242 * @enable_execlists: value of i915.enable_execlists module parameter.
244 * Only certain platforms support Execlists (the prerequisites being
245 * support for Logical Ring Contexts and Aliasing PPGTT or better).
247 * Return: 1 if Execlists is supported and has to be enabled.
249 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
251 /* On platforms with execlist available, vGPU will only
252 * support execlist mode, no ring buffer mode.
254 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
257 if (INTEL_GEN(dev_priv) >= 9)
260 if (enable_execlists == 0)
263 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
264 USES_PPGTT(dev_priv) &&
265 i915.use_mmio_flip >= 0)
272 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
274 struct drm_i915_private *dev_priv = engine->i915;
276 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
277 engine->idle_lite_restore_wa = ~0;
279 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
280 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
281 (engine->id == VCS || engine->id == VCS2);
283 engine->ctx_desc_template = GEN8_CTX_VALID;
284 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
285 GEN8_CTX_ADDRESSING_MODE_SHIFT;
286 if (IS_GEN8(dev_priv))
287 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
288 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
290 /* TODO: WaDisableLiteRestore when we start using semaphore
291 * signalling between Command Streamers */
292 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
294 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
295 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
296 if (engine->disable_lite_restore_wa)
297 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
301 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
302 * descriptor for a pinned context
304 * @ctx: Context to work on
305 * @engine: Engine the descriptor will be used with
307 * The context descriptor encodes various attributes of a context,
308 * including its GTT address and some flags. Because it's fairly
309 * expensive to calculate, we'll just do it once and cache the result,
310 * which remains valid until the context is unpinned.
312 * This is what a descriptor looks like, from LSB to MSB:
313 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
314 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
315 * bits 32-52: ctx ID, a globally unique tag
316 * bits 53-54: mbz, reserved for use by hardware
317 * bits 55-63: group ID, currently unused and set to 0
320 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
321 struct intel_engine_cs *engine)
323 struct intel_context *ce = &ctx->engine[engine->id];
326 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
328 desc = engine->ctx_desc_template; /* bits 0-11 */
329 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
331 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
336 uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
337 struct intel_engine_cs *engine)
339 return ctx->engine[engine->id].lrc_desc;
342 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
343 struct drm_i915_gem_request *rq1)
346 struct intel_engine_cs *engine = rq0->engine;
347 struct drm_i915_private *dev_priv = rq0->i915;
351 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
352 rq1->elsp_submitted++;
357 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
358 rq0->elsp_submitted++;
360 /* You must always write both descriptors in the order below. */
361 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
362 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
364 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
365 /* The context is automatically loaded after the following */
366 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
368 /* ELSP is a wo register, use another nearby reg for posting */
369 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
373 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
375 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
376 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
377 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
378 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
381 static void execlists_update_context(struct drm_i915_gem_request *rq)
383 struct intel_engine_cs *engine = rq->engine;
384 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
385 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
387 reg_state[CTX_RING_TAIL+1] = rq->tail;
389 /* True 32b PPGTT with dynamic page allocation: update PDP
390 * registers and point the unallocated PDPs to scratch page.
391 * PML4 is allocated during ppgtt init, so this is not needed
394 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
395 execlists_update_context_pdps(ppgtt, reg_state);
398 static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
399 struct drm_i915_gem_request *rq1)
401 struct drm_i915_private *dev_priv = rq0->i915;
402 unsigned int fw_domains = rq0->engine->fw_domains;
404 execlists_update_context(rq0);
407 execlists_update_context(rq1);
409 spin_lock_irq(&dev_priv->uncore.lock);
410 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
412 execlists_elsp_write(rq0, rq1);
414 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
415 spin_unlock_irq(&dev_priv->uncore.lock);
418 static void execlists_context_unqueue(struct intel_engine_cs *engine)
420 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
421 struct drm_i915_gem_request *cursor, *tmp;
423 assert_spin_locked(&engine->execlist_lock);
426 * If irqs are not active generate a warning as batches that finish
427 * without the irqs may get lost and a GPU Hang may occur.
429 WARN_ON(!intel_irqs_enabled(engine->i915));
431 /* Try to read in pairs */
432 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
436 } else if (req0->ctx == cursor->ctx) {
437 /* Same ctx: ignore first request, as second request
438 * will update tail past first request's workload */
439 cursor->elsp_submitted = req0->elsp_submitted;
440 list_del(&req0->execlist_link);
441 i915_gem_request_unreference(req0);
445 WARN_ON(req1->elsp_submitted);
453 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
455 * WaIdleLiteRestore: make sure we never cause a lite restore
458 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
459 * resubmit the request. See gen8_emit_request() for where we
460 * prepare the padding after the end of the request.
462 struct intel_ringbuffer *ringbuf;
464 ringbuf = req0->ctx->engine[engine->id].ringbuf;
466 req0->tail &= ringbuf->size - 1;
469 execlists_submit_requests(req0, req1);
473 execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
475 struct drm_i915_gem_request *head_req;
477 assert_spin_locked(&engine->execlist_lock);
479 head_req = list_first_entry_or_null(&engine->execlist_queue,
480 struct drm_i915_gem_request,
483 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
486 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
488 if (--head_req->elsp_submitted > 0)
491 list_del(&head_req->execlist_link);
492 i915_gem_request_unreference(head_req);
498 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
501 struct drm_i915_private *dev_priv = engine->i915;
504 read_pointer %= GEN8_CSB_ENTRIES;
506 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
508 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
511 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
518 * intel_lrc_irq_handler() - handle Context Switch interrupts
519 * @data: tasklet handler passed in unsigned long
521 * Check the unread Context Status Buffers and manage the submission of new
522 * contexts to the ELSP accordingly.
524 static void intel_lrc_irq_handler(unsigned long data)
526 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
527 struct drm_i915_private *dev_priv = engine->i915;
529 unsigned int read_pointer, write_pointer;
530 u32 csb[GEN8_CSB_ENTRIES][2];
531 unsigned int csb_read = 0, i;
532 unsigned int submit_contexts = 0;
534 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
536 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
538 read_pointer = engine->next_context_status_buffer;
539 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
540 if (read_pointer > write_pointer)
541 write_pointer += GEN8_CSB_ENTRIES;
543 while (read_pointer < write_pointer) {
544 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
546 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
551 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
553 /* Update the read pointer to the old write pointer. Manual ringbuffer
554 * management ftw </sarcasm> */
555 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
556 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
557 engine->next_context_status_buffer << 8));
559 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
561 spin_lock(&engine->execlist_lock);
563 for (i = 0; i < csb_read; i++) {
564 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
565 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
566 if (execlists_check_remove_request(engine, csb[i][1]))
567 WARN(1, "Lite Restored request removed from queue\n");
569 WARN(1, "Preemption without Lite Restore\n");
572 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
573 GEN8_CTX_STATUS_ELEMENT_SWITCH))
575 execlists_check_remove_request(engine, csb[i][1]);
578 if (submit_contexts) {
579 if (!engine->disable_lite_restore_wa ||
580 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
581 execlists_context_unqueue(engine);
584 spin_unlock(&engine->execlist_lock);
586 if (unlikely(submit_contexts > 2))
587 DRM_ERROR("More than two context complete events?\n");
590 static void execlists_context_queue(struct drm_i915_gem_request *request)
592 struct intel_engine_cs *engine = request->engine;
593 struct drm_i915_gem_request *cursor;
594 int num_elements = 0;
596 spin_lock_bh(&engine->execlist_lock);
598 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
599 if (++num_elements > 2)
602 if (num_elements > 2) {
603 struct drm_i915_gem_request *tail_req;
605 tail_req = list_last_entry(&engine->execlist_queue,
606 struct drm_i915_gem_request,
609 if (request->ctx == tail_req->ctx) {
610 WARN(tail_req->elsp_submitted != 0,
611 "More than 2 already-submitted reqs queued\n");
612 list_del(&tail_req->execlist_link);
613 i915_gem_request_unreference(tail_req);
617 i915_gem_request_reference(request);
618 list_add_tail(&request->execlist_link, &engine->execlist_queue);
619 request->ctx_hw_id = request->ctx->hw_id;
620 if (num_elements == 0)
621 execlists_context_unqueue(engine);
623 spin_unlock_bh(&engine->execlist_lock);
626 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
628 struct intel_engine_cs *engine = req->engine;
629 uint32_t flush_domains;
633 if (engine->gpu_caches_dirty)
634 flush_domains = I915_GEM_GPU_DOMAINS;
636 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
640 engine->gpu_caches_dirty = false;
644 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
645 struct list_head *vmas)
647 const unsigned other_rings = ~intel_engine_flag(req->engine);
648 struct i915_vma *vma;
649 uint32_t flush_domains = 0;
650 bool flush_chipset = false;
653 list_for_each_entry(vma, vmas, exec_list) {
654 struct drm_i915_gem_object *obj = vma->obj;
656 if (obj->active & other_rings) {
657 ret = i915_gem_object_sync(obj, req->engine, &req);
662 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
663 flush_chipset |= i915_gem_clflush_object(obj, false);
665 flush_domains |= obj->base.write_domain;
668 if (flush_domains & I915_GEM_DOMAIN_GTT)
671 /* Unconditionally invalidate gpu caches and ensure that we do flush
672 * any residual writes from the previous batch.
674 return logical_ring_invalidate_all_caches(req);
677 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
679 struct intel_engine_cs *engine = request->engine;
680 struct intel_context *ce = &request->ctx->engine[engine->id];
683 /* Flush enough space to reduce the likelihood of waiting after
684 * we start building the request - in which case we will just
685 * have to repeat work.
687 request->reserved_space += EXECLISTS_REQUEST_SIZE;
690 ret = execlists_context_deferred_alloc(request->ctx, engine);
695 request->ringbuf = ce->ringbuf;
697 if (i915.enable_guc_submission) {
699 * Check that the GuC has space for the request before
700 * going any further, as the i915_add_request() call
701 * later on mustn't fail ...
703 ret = i915_guc_wq_check_space(request);
708 ret = intel_lr_context_pin(request->ctx, engine);
712 ret = intel_ring_begin(request, 0);
716 if (!ce->initialised) {
717 ret = engine->init_context(request);
721 ce->initialised = true;
724 /* Note that after this point, we have committed to using
725 * this request as it is being used to both track the
726 * state of engine initialisation and liveness of the
727 * golden renderstate above. Think twice before you try
728 * to cancel/unwind this request now.
731 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
735 intel_lr_context_unpin(request->ctx, engine);
740 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
741 * @request: Request to advance the logical ringbuffer of.
743 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
744 * really happens during submission is that the context and current tail will be placed
745 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
746 * point, the tail *inside* the context is updated and the ELSP written to.
749 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
751 struct intel_ringbuffer *ringbuf = request->ringbuf;
752 struct intel_engine_cs *engine = request->engine;
754 intel_logical_ring_advance(ringbuf);
755 request->tail = ringbuf->tail;
758 * Here we add two extra NOOPs as padding to avoid
759 * lite restore of a context with HEAD==TAIL.
761 * Caller must reserve WA_TAIL_DWORDS for us!
763 intel_logical_ring_emit(ringbuf, MI_NOOP);
764 intel_logical_ring_emit(ringbuf, MI_NOOP);
765 intel_logical_ring_advance(ringbuf);
767 if (intel_engine_stopped(engine))
770 /* We keep the previous context alive until we retire the following
771 * request. This ensures that any the context object is still pinned
772 * for any residual writes the HW makes into it on the context switch
773 * into the next object following the breadcrumb. Otherwise, we may
774 * retire the context too early.
776 request->previous_context = engine->last_context;
777 engine->last_context = request->ctx;
779 if (i915.enable_guc_submission)
780 i915_guc_submit(request);
782 execlists_context_queue(request);
788 * execlists_submission() - submit a batchbuffer for execution, Execlists style
789 * @params: execbuffer call parameters.
790 * @args: execbuffer call arguments.
791 * @vmas: list of vmas.
793 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
794 * away the submission details of the execbuffer ioctl call.
796 * Return: non-zero if the submission fails.
798 int intel_execlists_submission(struct i915_execbuffer_params *params,
799 struct drm_i915_gem_execbuffer2 *args,
800 struct list_head *vmas)
802 struct drm_device *dev = params->dev;
803 struct intel_engine_cs *engine = params->engine;
804 struct drm_i915_private *dev_priv = dev->dev_private;
805 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
811 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
812 instp_mask = I915_EXEC_CONSTANTS_MASK;
813 switch (instp_mode) {
814 case I915_EXEC_CONSTANTS_REL_GENERAL:
815 case I915_EXEC_CONSTANTS_ABSOLUTE:
816 case I915_EXEC_CONSTANTS_REL_SURFACE:
817 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
818 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
822 if (instp_mode != dev_priv->relative_constants_mode) {
823 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
824 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
828 /* The HW changed the meaning on this bit on gen6 */
829 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
833 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
837 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
838 DRM_DEBUG("sol reset is gen7 only\n");
842 ret = execlists_move_to_gpu(params->request, vmas);
846 if (engine == &dev_priv->engine[RCS] &&
847 instp_mode != dev_priv->relative_constants_mode) {
848 ret = intel_ring_begin(params->request, 4);
852 intel_logical_ring_emit(ringbuf, MI_NOOP);
853 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
854 intel_logical_ring_emit_reg(ringbuf, INSTPM);
855 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
856 intel_logical_ring_advance(ringbuf);
858 dev_priv->relative_constants_mode = instp_mode;
861 exec_start = params->batch_obj_vm_offset +
862 args->batch_start_offset;
864 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
868 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
870 i915_gem_execbuffer_move_to_active(vmas, params->request);
875 void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
877 struct drm_i915_gem_request *req, *tmp;
878 LIST_HEAD(cancel_list);
880 WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
882 spin_lock_bh(&engine->execlist_lock);
883 list_replace_init(&engine->execlist_queue, &cancel_list);
884 spin_unlock_bh(&engine->execlist_lock);
886 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
887 list_del(&req->execlist_link);
888 i915_gem_request_unreference(req);
892 void intel_logical_ring_stop(struct intel_engine_cs *engine)
894 struct drm_i915_private *dev_priv = engine->i915;
897 if (!intel_engine_initialized(engine))
900 ret = intel_engine_idle(engine);
902 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
905 /* TODO: Is this correct with Execlists enabled? */
906 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
907 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
908 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
911 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
914 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
916 struct intel_engine_cs *engine = req->engine;
919 if (!engine->gpu_caches_dirty)
922 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
926 engine->gpu_caches_dirty = false;
930 static int intel_lr_context_pin(struct i915_gem_context *ctx,
931 struct intel_engine_cs *engine)
933 struct drm_i915_private *dev_priv = ctx->i915;
934 struct intel_context *ce = &ctx->engine[engine->id];
939 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
944 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
945 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
949 vaddr = i915_gem_object_pin_map(ce->state);
951 ret = PTR_ERR(vaddr);
955 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
957 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
961 i915_gem_context_reference(ctx);
962 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
963 intel_lr_context_descriptor_update(ctx, engine);
965 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
966 ce->lrc_reg_state = lrc_reg_state;
967 ce->state->dirty = true;
969 /* Invalidate GuC TLB. */
970 if (i915.enable_guc_submission)
971 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
976 i915_gem_object_unpin_map(ce->state);
978 i915_gem_object_ggtt_unpin(ce->state);
984 void intel_lr_context_unpin(struct i915_gem_context *ctx,
985 struct intel_engine_cs *engine)
987 struct intel_context *ce = &ctx->engine[engine->id];
989 lockdep_assert_held(&ctx->i915->dev->struct_mutex);
990 GEM_BUG_ON(ce->pin_count == 0);
995 intel_unpin_ringbuffer_obj(ce->ringbuf);
997 i915_gem_object_unpin_map(ce->state);
998 i915_gem_object_ggtt_unpin(ce->state);
1002 ce->lrc_reg_state = NULL;
1004 i915_gem_context_unreference(ctx);
1007 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1010 struct intel_engine_cs *engine = req->engine;
1011 struct intel_ringbuffer *ringbuf = req->ringbuf;
1012 struct i915_workarounds *w = &req->i915->workarounds;
1017 engine->gpu_caches_dirty = true;
1018 ret = logical_ring_flush_all_caches(req);
1022 ret = intel_ring_begin(req, w->count * 2 + 2);
1026 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1027 for (i = 0; i < w->count; i++) {
1028 intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
1029 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1031 intel_logical_ring_emit(ringbuf, MI_NOOP);
1033 intel_logical_ring_advance(ringbuf);
1035 engine->gpu_caches_dirty = true;
1036 ret = logical_ring_flush_all_caches(req);
1043 #define wa_ctx_emit(batch, index, cmd) \
1045 int __index = (index)++; \
1046 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
1049 batch[__index] = (cmd); \
1052 #define wa_ctx_emit_reg(batch, index, reg) \
1053 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
1056 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1057 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1058 * but there is a slight complication as this is applied in WA batch where the
1059 * values are only initialized once so we cannot take register value at the
1060 * beginning and reuse it further; hence we save its value to memory, upload a
1061 * constant value with bit21 set and then we restore it back with the saved value.
1062 * To simplify the WA, a constant value is formed by using the default value
1063 * of this register. This shouldn't be a problem because we are only modifying
1064 * it for a short period and this batch in non-premptible. We can ofcourse
1065 * use additional instructions that read the actual value of the register
1066 * at that time and set our bit of interest but it makes the WA complicated.
1068 * This WA is also required for Gen9 so extracting as a function avoids
1071 static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1072 uint32_t *const batch,
1075 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1078 * WaDisableLSQCROPERFforOCL:skl
1079 * This WA is implemented in skl_init_clock_gating() but since
1080 * this batch updates GEN8_L3SQCREG4 with default value we need to
1081 * set this bit here to retain the WA during flush.
1083 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0))
1084 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1086 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
1087 MI_SRM_LRM_GLOBAL_GTT));
1088 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1089 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1090 wa_ctx_emit(batch, index, 0);
1092 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1093 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1094 wa_ctx_emit(batch, index, l3sqc4_flush);
1096 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1097 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1098 PIPE_CONTROL_DC_FLUSH_ENABLE));
1099 wa_ctx_emit(batch, index, 0);
1100 wa_ctx_emit(batch, index, 0);
1101 wa_ctx_emit(batch, index, 0);
1102 wa_ctx_emit(batch, index, 0);
1104 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
1105 MI_SRM_LRM_GLOBAL_GTT));
1106 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
1107 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
1108 wa_ctx_emit(batch, index, 0);
1113 static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1115 uint32_t start_alignment)
1117 return wa_ctx->offset = ALIGN(offset, start_alignment);
1120 static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1122 uint32_t size_alignment)
1124 wa_ctx->size = offset - wa_ctx->offset;
1126 WARN(wa_ctx->size % size_alignment,
1127 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1128 wa_ctx->size, size_alignment);
1133 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1135 * @engine: only applicable for RCS
1136 * @wa_ctx: structure representing wa_ctx
1137 * offset: specifies start of the batch, should be cache-aligned. This is updated
1138 * with the offset value received as input.
1139 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1140 * @batch: page in which WA are loaded
1141 * @offset: This field specifies the start of the batch, it should be
1142 * cache-aligned otherwise it is adjusted accordingly.
1143 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1144 * initialized at the beginning and shared across all contexts but this field
1145 * helps us to have multiple batches at different offsets and select them based
1146 * on a criteria. At the moment this batch always start at the beginning of the page
1147 * and at this point we don't have multiple wa_ctx batch buffers.
1149 * The number of WA applied are not known at the beginning; we use this field
1150 * to return the no of DWORDS written.
1152 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1153 * so it adds NOOPs as padding to make it cacheline aligned.
1154 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1155 * makes a complete batch buffer.
1157 * Return: non-zero if we exceed the PAGE_SIZE limit.
1160 static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1161 struct i915_wa_ctx_bb *wa_ctx,
1162 uint32_t *const batch,
1165 uint32_t scratch_addr;
1166 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1168 /* WaDisableCtxRestoreArbitration:bdw,chv */
1169 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1171 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1172 if (IS_BROADWELL(engine->i915)) {
1173 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1179 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1180 /* Actual scratch location is at 128 bytes offset */
1181 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1183 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1184 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1185 PIPE_CONTROL_GLOBAL_GTT_IVB |
1186 PIPE_CONTROL_CS_STALL |
1187 PIPE_CONTROL_QW_WRITE));
1188 wa_ctx_emit(batch, index, scratch_addr);
1189 wa_ctx_emit(batch, index, 0);
1190 wa_ctx_emit(batch, index, 0);
1191 wa_ctx_emit(batch, index, 0);
1193 /* Pad to end of cacheline */
1194 while (index % CACHELINE_DWORDS)
1195 wa_ctx_emit(batch, index, MI_NOOP);
1198 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1199 * execution depends on the length specified in terms of cache lines
1200 * in the register CTX_RCS_INDIRECT_CTX
1203 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1207 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1209 * @engine: only applicable for RCS
1210 * @wa_ctx: structure representing wa_ctx
1211 * offset: specifies start of the batch, should be cache-aligned.
1212 * size: size of the batch in DWORDS but HW expects in terms of cachelines
1213 * @batch: page in which WA are loaded
1214 * @offset: This field specifies the start of this batch.
1215 * This batch is started immediately after indirect_ctx batch. Since we ensure
1216 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
1218 * The number of DWORDS written are returned using this field.
1220 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1221 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1223 static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
1224 struct i915_wa_ctx_bb *wa_ctx,
1225 uint32_t *const batch,
1228 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1230 /* WaDisableCtxRestoreArbitration:bdw,chv */
1231 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1233 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1235 return wa_ctx_end(wa_ctx, *offset = index, 1);
1238 static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1239 struct i915_wa_ctx_bb *wa_ctx,
1240 uint32_t *const batch,
1244 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1246 /* WaDisableCtxRestoreArbitration:skl,bxt */
1247 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1248 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1249 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1251 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
1252 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1257 /* Pad to end of cacheline */
1258 while (index % CACHELINE_DWORDS)
1259 wa_ctx_emit(batch, index, MI_NOOP);
1261 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1264 static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1265 struct i915_wa_ctx_bb *wa_ctx,
1266 uint32_t *const batch,
1269 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1271 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1272 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1273 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1274 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1275 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1276 wa_ctx_emit(batch, index,
1277 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1278 wa_ctx_emit(batch, index, MI_NOOP);
1281 /* WaClearTdlStateAckDirtyBits:bxt */
1282 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1283 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1285 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1286 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1288 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1289 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1291 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1292 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1294 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1295 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1296 wa_ctx_emit(batch, index, 0x0);
1297 wa_ctx_emit(batch, index, MI_NOOP);
1300 /* WaDisableCtxRestoreArbitration:skl,bxt */
1301 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1302 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1303 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1305 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1307 return wa_ctx_end(wa_ctx, *offset = index, 1);
1310 static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1314 engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
1316 if (IS_ERR(engine->wa_ctx.obj)) {
1317 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1318 ret = PTR_ERR(engine->wa_ctx.obj);
1319 engine->wa_ctx.obj = NULL;
1323 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
1325 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1327 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1334 static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
1336 if (engine->wa_ctx.obj) {
1337 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
1338 drm_gem_object_unreference(&engine->wa_ctx.obj->base);
1339 engine->wa_ctx.obj = NULL;
1343 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1349 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1351 WARN_ON(engine->id != RCS);
1353 /* update this when WA for higher Gen are added */
1354 if (INTEL_GEN(engine->i915) > 9) {
1355 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1356 INTEL_GEN(engine->i915));
1360 /* some WA perform writes to scratch page, ensure it is valid */
1361 if (engine->scratch.obj == NULL) {
1362 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
1366 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
1368 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1372 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
1373 batch = kmap_atomic(page);
1376 if (IS_GEN8(engine->i915)) {
1377 ret = gen8_init_indirectctx_bb(engine,
1378 &wa_ctx->indirect_ctx,
1384 ret = gen8_init_perctx_bb(engine,
1390 } else if (IS_GEN9(engine->i915)) {
1391 ret = gen9_init_indirectctx_bb(engine,
1392 &wa_ctx->indirect_ctx,
1398 ret = gen9_init_perctx_bb(engine,
1407 kunmap_atomic(batch);
1409 lrc_destroy_wa_ctx_obj(engine);
1414 static void lrc_init_hws(struct intel_engine_cs *engine)
1416 struct drm_i915_private *dev_priv = engine->i915;
1418 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1419 (u32)engine->status_page.gfx_addr);
1420 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1423 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1425 struct drm_i915_private *dev_priv = engine->i915;
1426 unsigned int next_context_status_buffer_hw;
1428 lrc_init_hws(engine);
1430 I915_WRITE_IMR(engine,
1431 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1432 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1434 I915_WRITE(RING_MODE_GEN7(engine),
1435 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1436 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1437 POSTING_READ(RING_MODE_GEN7(engine));
1440 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1441 * zero, we need to read the write pointer from hardware and use its
1442 * value because "this register is power context save restored".
1443 * Effectively, these states have been observed:
1445 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1446 * BDW | CSB regs not reset | CSB regs reset |
1447 * CHT | CSB regs not reset | CSB regs not reset |
1451 next_context_status_buffer_hw =
1452 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
1455 * When the CSB registers are reset (also after power-up / gpu reset),
1456 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1457 * this special case, so the first element read is CSB[0].
1459 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1460 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1462 engine->next_context_status_buffer = next_context_status_buffer_hw;
1463 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1465 intel_engine_init_hangcheck(engine);
1467 return intel_mocs_init_engine(engine);
1470 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1472 struct drm_i915_private *dev_priv = engine->i915;
1475 ret = gen8_init_common_ring(engine);
1479 /* We need to disable the AsyncFlip performance optimisations in order
1480 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1481 * programmed to '1' on all products.
1483 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1485 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1487 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1489 return init_workarounds_ring(engine);
1492 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1496 ret = gen8_init_common_ring(engine);
1500 return init_workarounds_ring(engine);
1503 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1505 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1506 struct intel_engine_cs *engine = req->engine;
1507 struct intel_ringbuffer *ringbuf = req->ringbuf;
1508 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1511 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1515 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
1516 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1517 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1519 intel_logical_ring_emit_reg(ringbuf,
1520 GEN8_RING_PDP_UDW(engine, i));
1521 intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
1522 intel_logical_ring_emit_reg(ringbuf,
1523 GEN8_RING_PDP_LDW(engine, i));
1524 intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
1527 intel_logical_ring_emit(ringbuf, MI_NOOP);
1528 intel_logical_ring_advance(ringbuf);
1533 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1534 u64 offset, unsigned dispatch_flags)
1536 struct intel_ringbuffer *ringbuf = req->ringbuf;
1537 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1540 /* Don't rely in hw updating PDPs, specially in lite-restore.
1541 * Ideally, we should set Force PD Restore in ctx descriptor,
1542 * but we can't. Force Restore would be a second option, but
1543 * it is unsafe in case of lite-restore (because the ctx is
1544 * not idle). PML4 is allocated during ppgtt init so this is
1545 * not needed in 48-bit.*/
1546 if (req->ctx->ppgtt &&
1547 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1548 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1549 !intel_vgpu_active(req->i915)) {
1550 ret = intel_logical_ring_emit_pdps(req);
1555 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1558 ret = intel_ring_begin(req, 4);
1562 /* FIXME(BDW): Address space and security selectors. */
1563 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
1565 (dispatch_flags & I915_DISPATCH_RS ?
1566 MI_BATCH_RESOURCE_STREAMER : 0));
1567 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1568 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1569 intel_logical_ring_emit(ringbuf, MI_NOOP);
1570 intel_logical_ring_advance(ringbuf);
1575 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
1577 struct drm_i915_private *dev_priv = engine->i915;
1578 unsigned long flags;
1580 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1583 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1584 if (engine->irq_refcount++ == 0) {
1585 I915_WRITE_IMR(engine,
1586 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1587 POSTING_READ(RING_IMR(engine->mmio_base));
1589 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1594 static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
1596 struct drm_i915_private *dev_priv = engine->i915;
1597 unsigned long flags;
1599 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1600 if (--engine->irq_refcount == 0) {
1601 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1602 POSTING_READ(RING_IMR(engine->mmio_base));
1604 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1607 static int gen8_emit_flush(struct drm_i915_gem_request *request,
1608 u32 invalidate_domains,
1611 struct intel_ringbuffer *ringbuf = request->ringbuf;
1612 struct intel_engine_cs *engine = ringbuf->engine;
1613 struct drm_i915_private *dev_priv = request->i915;
1617 ret = intel_ring_begin(request, 4);
1621 cmd = MI_FLUSH_DW + 1;
1623 /* We always require a command barrier so that subsequent
1624 * commands, such as breadcrumb interrupts, are strictly ordered
1625 * wrt the contents of the write cache being flushed to memory
1626 * (and thus being coherent from the CPU).
1628 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1630 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1631 cmd |= MI_INVALIDATE_TLB;
1632 if (engine == &dev_priv->engine[VCS])
1633 cmd |= MI_INVALIDATE_BSD;
1636 intel_logical_ring_emit(ringbuf, cmd);
1637 intel_logical_ring_emit(ringbuf,
1638 I915_GEM_HWS_SCRATCH_ADDR |
1639 MI_FLUSH_DW_USE_GTT);
1640 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1641 intel_logical_ring_emit(ringbuf, 0); /* value */
1642 intel_logical_ring_advance(ringbuf);
1647 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1648 u32 invalidate_domains,
1651 struct intel_ringbuffer *ringbuf = request->ringbuf;
1652 struct intel_engine_cs *engine = ringbuf->engine;
1653 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1654 bool vf_flush_wa = false;
1658 flags |= PIPE_CONTROL_CS_STALL;
1660 if (flush_domains) {
1661 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1662 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1663 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1664 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1667 if (invalidate_domains) {
1668 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1669 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1670 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1671 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1672 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1673 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1674 flags |= PIPE_CONTROL_QW_WRITE;
1675 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1678 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1681 if (IS_GEN9(request->i915))
1685 ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
1690 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1691 intel_logical_ring_emit(ringbuf, 0);
1692 intel_logical_ring_emit(ringbuf, 0);
1693 intel_logical_ring_emit(ringbuf, 0);
1694 intel_logical_ring_emit(ringbuf, 0);
1695 intel_logical_ring_emit(ringbuf, 0);
1698 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1699 intel_logical_ring_emit(ringbuf, flags);
1700 intel_logical_ring_emit(ringbuf, scratch_addr);
1701 intel_logical_ring_emit(ringbuf, 0);
1702 intel_logical_ring_emit(ringbuf, 0);
1703 intel_logical_ring_emit(ringbuf, 0);
1704 intel_logical_ring_advance(ringbuf);
1709 static u32 gen8_get_seqno(struct intel_engine_cs *engine)
1711 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1714 static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1716 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1719 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1722 * On BXT A steppings there is a HW coherency issue whereby the
1723 * MI_STORE_DATA_IMM storing the completed request's seqno
1724 * occasionally doesn't invalidate the CPU cache. Work around this by
1725 * clflushing the corresponding cacheline whenever the caller wants
1726 * the coherency to be guaranteed. Note that this cacheline is known
1727 * to be clean at this point, since we only write it in
1728 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1729 * this clflush in practice becomes an invalidate operation.
1731 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1734 static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1736 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1738 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1739 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1743 * Reserve space for 2 NOOPs at the end of each request to be
1744 * used as a workaround for not being allowed to do lite
1745 * restore with HEAD==TAIL (WaIdleLiteRestore).
1747 #define WA_TAIL_DWORDS 2
1749 static int gen8_emit_request(struct drm_i915_gem_request *request)
1751 struct intel_ringbuffer *ringbuf = request->ringbuf;
1754 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
1758 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1759 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1761 intel_logical_ring_emit(ringbuf,
1762 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1763 intel_logical_ring_emit(ringbuf,
1764 intel_hws_seqno_address(request->engine) |
1765 MI_FLUSH_DW_USE_GTT);
1766 intel_logical_ring_emit(ringbuf, 0);
1767 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1768 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1769 intel_logical_ring_emit(ringbuf, MI_NOOP);
1770 return intel_logical_ring_advance_and_submit(request);
1773 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1775 struct intel_ringbuffer *ringbuf = request->ringbuf;
1778 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
1782 /* We're using qword write, seqno should be aligned to 8 bytes. */
1783 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1785 /* w/a for post sync ops following a GPGPU operation we
1786 * need a prior CS_STALL, which is emitted by the flush
1787 * following the batch.
1789 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1790 intel_logical_ring_emit(ringbuf,
1791 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1792 PIPE_CONTROL_CS_STALL |
1793 PIPE_CONTROL_QW_WRITE));
1794 intel_logical_ring_emit(ringbuf,
1795 intel_hws_seqno_address(request->engine));
1796 intel_logical_ring_emit(ringbuf, 0);
1797 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1798 /* We're thrashing one dword of HWS. */
1799 intel_logical_ring_emit(ringbuf, 0);
1800 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1801 intel_logical_ring_emit(ringbuf, MI_NOOP);
1802 return intel_logical_ring_advance_and_submit(request);
1805 static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
1807 struct render_state so;
1810 ret = i915_gem_render_state_prepare(req->engine, &so);
1814 if (so.rodata == NULL)
1817 ret = req->engine->emit_bb_start(req, so.ggtt_offset,
1818 I915_DISPATCH_SECURE);
1822 ret = req->engine->emit_bb_start(req,
1823 (so.ggtt_offset + so.aux_batch_offset),
1824 I915_DISPATCH_SECURE);
1828 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
1831 i915_gem_render_state_fini(&so);
1835 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1839 ret = intel_logical_ring_workarounds_emit(req);
1843 ret = intel_rcs_context_init_mocs(req);
1845 * Failing to program the MOCS is non-fatal.The system will not
1846 * run at peak performance. So generate an error and carry on.
1849 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1851 return intel_lr_context_render_state_init(req);
1855 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1857 * @engine: Engine Command Streamer.
1860 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1862 struct drm_i915_private *dev_priv;
1864 if (!intel_engine_initialized(engine))
1868 * Tasklet cannot be active at this point due intel_mark_active/idle
1869 * so this is just for documentation.
1871 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1872 tasklet_kill(&engine->irq_tasklet);
1874 dev_priv = engine->i915;
1876 if (engine->buffer) {
1877 intel_logical_ring_stop(engine);
1878 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
1881 if (engine->cleanup)
1882 engine->cleanup(engine);
1884 i915_cmd_parser_fini_ring(engine);
1885 i915_gem_batch_pool_fini(&engine->batch_pool);
1887 if (engine->status_page.obj) {
1888 i915_gem_object_unpin_map(engine->status_page.obj);
1889 engine->status_page.obj = NULL;
1891 intel_lr_context_unpin(dev_priv->kernel_context, engine);
1893 engine->idle_lite_restore_wa = 0;
1894 engine->disable_lite_restore_wa = false;
1895 engine->ctx_desc_template = 0;
1897 lrc_destroy_wa_ctx_obj(engine);
1898 engine->i915 = NULL;
1902 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1904 /* Default vfuncs which can be overriden by each engine. */
1905 engine->init_hw = gen8_init_common_ring;
1906 engine->emit_request = gen8_emit_request;
1907 engine->emit_flush = gen8_emit_flush;
1908 engine->irq_get = gen8_logical_ring_get_irq;
1909 engine->irq_put = gen8_logical_ring_put_irq;
1910 engine->emit_bb_start = gen8_emit_bb_start;
1911 engine->get_seqno = gen8_get_seqno;
1912 engine->set_seqno = gen8_set_seqno;
1913 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1914 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1915 engine->set_seqno = bxt_a_set_seqno;
1920 logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
1922 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1923 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1924 init_waitqueue_head(&engine->irq_queue);
1928 lrc_setup_hws(struct intel_engine_cs *engine,
1929 struct drm_i915_gem_object *dctx_obj)
1933 /* The HWSP is part of the default context object in LRC mode. */
1934 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
1935 LRC_PPHWSP_PN * PAGE_SIZE;
1936 hws = i915_gem_object_pin_map(dctx_obj);
1938 return PTR_ERR(hws);
1939 engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
1940 engine->status_page.obj = dctx_obj;
1945 static const struct logical_ring_info {
1951 } logical_rings[] = {
1953 .name = "render ring",
1954 .exec_id = I915_EXEC_RENDER,
1955 .guc_id = GUC_RENDER_ENGINE,
1956 .mmio_base = RENDER_RING_BASE,
1957 .irq_shift = GEN8_RCS_IRQ_SHIFT,
1960 .name = "blitter ring",
1961 .exec_id = I915_EXEC_BLT,
1962 .guc_id = GUC_BLITTER_ENGINE,
1963 .mmio_base = BLT_RING_BASE,
1964 .irq_shift = GEN8_BCS_IRQ_SHIFT,
1968 .exec_id = I915_EXEC_BSD,
1969 .guc_id = GUC_VIDEO_ENGINE,
1970 .mmio_base = GEN6_BSD_RING_BASE,
1971 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
1974 .name = "bsd2 ring",
1975 .exec_id = I915_EXEC_BSD,
1976 .guc_id = GUC_VIDEO_ENGINE2,
1977 .mmio_base = GEN8_BSD2_RING_BASE,
1978 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
1981 .name = "video enhancement ring",
1982 .exec_id = I915_EXEC_VEBOX,
1983 .guc_id = GUC_VIDEOENHANCE_ENGINE,
1984 .mmio_base = VEBOX_RING_BASE,
1985 .irq_shift = GEN8_VECS_IRQ_SHIFT,
1989 static struct intel_engine_cs *
1990 logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
1992 const struct logical_ring_info *info = &logical_rings[id];
1993 struct drm_i915_private *dev_priv = to_i915(dev);
1994 struct intel_engine_cs *engine = &dev_priv->engine[id];
1995 enum forcewake_domains fw_domains;
1998 engine->name = info->name;
1999 engine->exec_id = info->exec_id;
2000 engine->guc_id = info->guc_id;
2001 engine->mmio_base = info->mmio_base;
2003 engine->i915 = dev_priv;
2005 /* Intentionally left blank. */
2006 engine->buffer = NULL;
2008 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2012 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2013 RING_CONTEXT_STATUS_PTR(engine),
2014 FW_REG_READ | FW_REG_WRITE);
2016 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2017 RING_CONTEXT_STATUS_BUF_BASE(engine),
2020 engine->fw_domains = fw_domains;
2022 INIT_LIST_HEAD(&engine->active_list);
2023 INIT_LIST_HEAD(&engine->request_list);
2024 INIT_LIST_HEAD(&engine->buffers);
2025 INIT_LIST_HEAD(&engine->execlist_queue);
2026 spin_lock_init(&engine->execlist_lock);
2028 tasklet_init(&engine->irq_tasklet,
2029 intel_lrc_irq_handler, (unsigned long)engine);
2031 logical_ring_init_platform_invariants(engine);
2032 logical_ring_default_vfuncs(engine);
2033 logical_ring_default_irqs(engine, info->irq_shift);
2035 intel_engine_init_hangcheck(engine);
2036 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2042 logical_ring_init(struct intel_engine_cs *engine)
2044 struct i915_gem_context *dctx = engine->i915->kernel_context;
2047 ret = i915_cmd_parser_init_ring(engine);
2051 ret = execlists_context_deferred_alloc(dctx, engine);
2055 /* As this is the default context, always pin it */
2056 ret = intel_lr_context_pin(dctx, engine);
2058 DRM_ERROR("Failed to pin context for %s: %d\n",
2063 /* And setup the hardware status page. */
2064 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2066 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2073 intel_logical_ring_cleanup(engine);
2077 static int logical_render_ring_init(struct drm_device *dev)
2079 struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
2082 if (HAS_L3_DPF(dev))
2083 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2085 /* Override some for render ring. */
2086 if (INTEL_INFO(dev)->gen >= 9)
2087 engine->init_hw = gen9_init_render_ring;
2089 engine->init_hw = gen8_init_render_ring;
2090 engine->init_context = gen8_init_rcs_context;
2091 engine->cleanup = intel_fini_pipe_control;
2092 engine->emit_flush = gen8_emit_flush_render;
2093 engine->emit_request = gen8_emit_request_render;
2095 ret = intel_init_pipe_control(engine);
2099 ret = intel_init_workaround_bb(engine);
2102 * We continue even if we fail to initialize WA batch
2103 * because we only expect rare glitches but nothing
2104 * critical to prevent us from using GPU
2106 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2110 ret = logical_ring_init(engine);
2112 lrc_destroy_wa_ctx_obj(engine);
2118 static int logical_bsd_ring_init(struct drm_device *dev)
2120 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
2122 return logical_ring_init(engine);
2125 static int logical_bsd2_ring_init(struct drm_device *dev)
2127 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
2129 return logical_ring_init(engine);
2132 static int logical_blt_ring_init(struct drm_device *dev)
2134 struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
2136 return logical_ring_init(engine);
2139 static int logical_vebox_ring_init(struct drm_device *dev)
2141 struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
2143 return logical_ring_init(engine);
2147 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2150 * This function inits the engines for an Execlists submission style (the equivalent in the
2151 * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
2152 * those engines that are present in the hardware.
2154 * Return: non-zero if the initialization failed.
2156 int intel_logical_rings_init(struct drm_device *dev)
2158 struct drm_i915_private *dev_priv = dev->dev_private;
2161 ret = logical_render_ring_init(dev);
2166 ret = logical_bsd_ring_init(dev);
2168 goto cleanup_render_ring;
2172 ret = logical_blt_ring_init(dev);
2174 goto cleanup_bsd_ring;
2177 if (HAS_VEBOX(dev)) {
2178 ret = logical_vebox_ring_init(dev);
2180 goto cleanup_blt_ring;
2183 if (HAS_BSD2(dev)) {
2184 ret = logical_bsd2_ring_init(dev);
2186 goto cleanup_vebox_ring;
2192 intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
2194 intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
2196 intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
2197 cleanup_render_ring:
2198 intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
2204 make_rpcs(struct drm_i915_private *dev_priv)
2209 * No explicit RPCS request is needed to ensure full
2210 * slice/subslice/EU enablement prior to Gen9.
2212 if (INTEL_GEN(dev_priv) < 9)
2216 * Starting in Gen9, render power gating can leave
2217 * slice/subslice/EU in a partially enabled state. We
2218 * must make an explicit request through RPCS for full
2221 if (INTEL_INFO(dev_priv)->has_slice_pg) {
2222 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2223 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
2224 GEN8_RPCS_S_CNT_SHIFT;
2225 rpcs |= GEN8_RPCS_ENABLE;
2228 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
2229 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2230 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
2231 GEN8_RPCS_SS_CNT_SHIFT;
2232 rpcs |= GEN8_RPCS_ENABLE;
2235 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2236 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2237 GEN8_RPCS_EU_MIN_SHIFT;
2238 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2239 GEN8_RPCS_EU_MAX_SHIFT;
2240 rpcs |= GEN8_RPCS_ENABLE;
2246 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2248 u32 indirect_ctx_offset;
2250 switch (INTEL_GEN(engine->i915)) {
2252 MISSING_CASE(INTEL_GEN(engine->i915));
2255 indirect_ctx_offset =
2256 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2259 indirect_ctx_offset =
2260 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2264 return indirect_ctx_offset;
2268 populate_lr_context(struct i915_gem_context *ctx,
2269 struct drm_i915_gem_object *ctx_obj,
2270 struct intel_engine_cs *engine,
2271 struct intel_ringbuffer *ringbuf)
2273 struct drm_i915_private *dev_priv = ctx->i915;
2274 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2280 ppgtt = dev_priv->mm.aliasing_ppgtt;
2282 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2284 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2288 vaddr = i915_gem_object_pin_map(ctx_obj);
2289 if (IS_ERR(vaddr)) {
2290 ret = PTR_ERR(vaddr);
2291 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2294 ctx_obj->dirty = true;
2296 /* The second page of the context object contains some fields which must
2297 * be set up prior to the first execution. */
2298 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2300 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2301 * commands followed by (reg, value) pairs. The values we are setting here are
2302 * only for the first context restore: on a subsequent save, the GPU will
2303 * recreate this batchbuffer with new values (including all the missing
2304 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
2305 reg_state[CTX_LRI_HEADER_0] =
2306 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2307 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2308 RING_CONTEXT_CONTROL(engine),
2309 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2310 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2311 (HAS_RESOURCE_STREAMER(dev_priv) ?
2312 CTX_CTRL_RS_CTX_ENABLE : 0)));
2313 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2315 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2317 /* Ring buffer start address is not known until the buffer is pinned.
2318 * It is written to the context image in execlists_update_context()
2320 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2321 RING_START(engine->mmio_base), 0);
2322 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2323 RING_CTL(engine->mmio_base),
2324 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
2325 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2326 RING_BBADDR_UDW(engine->mmio_base), 0);
2327 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2328 RING_BBADDR(engine->mmio_base), 0);
2329 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2330 RING_BBSTATE(engine->mmio_base),
2332 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2333 RING_SBBADDR_UDW(engine->mmio_base), 0);
2334 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2335 RING_SBBADDR(engine->mmio_base), 0);
2336 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2337 RING_SBBSTATE(engine->mmio_base), 0);
2338 if (engine->id == RCS) {
2339 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2340 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2341 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2342 RING_INDIRECT_CTX(engine->mmio_base), 0);
2343 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2344 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2345 if (engine->wa_ctx.obj) {
2346 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2347 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2349 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2350 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2351 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2353 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
2354 intel_lr_indirect_ctx_offset(engine) << 6;
2356 reg_state[CTX_BB_PER_CTX_PTR+1] =
2357 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2361 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2362 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2363 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
2364 /* PDP values well be assigned later if needed */
2365 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2367 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2369 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2371 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2373 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2375 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2377 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2379 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2382 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2383 /* 64b PPGTT (48bit canonical)
2384 * PDP0_DESCRIPTOR contains the base address to PML4 and
2385 * other PDP Descriptors are ignored.
2387 ASSIGN_CTX_PML4(ppgtt, reg_state);
2390 * PDP*_DESCRIPTOR contains the base address of space supported.
2391 * With dynamic page allocation, PDPs may not be allocated at
2392 * this point. Point the unallocated PDPs to the scratch page
2394 execlists_update_context_pdps(ppgtt, reg_state);
2397 if (engine->id == RCS) {
2398 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2399 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2400 make_rpcs(dev_priv));
2403 i915_gem_object_unpin_map(ctx_obj);
2409 * intel_lr_context_size() - return the size of the context for an engine
2410 * @engine: which engine to find the context size for
2412 * Each engine may require a different amount of space for a context image,
2413 * so when allocating (or copying) an image, this function can be used to
2414 * find the right size for the specific engine.
2416 * Return: size (in bytes) of an engine-specific context image
2418 * Note: this size includes the HWSP, which is part of the context image
2419 * in LRC mode, but does not include the "shared data page" used with
2420 * GuC submission. The caller should account for this if using the GuC.
2422 uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2426 WARN_ON(INTEL_GEN(engine->i915) < 8);
2428 switch (engine->id) {
2430 if (INTEL_GEN(engine->i915) >= 9)
2431 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2433 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
2439 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2447 * execlists_context_deferred_alloc() - create the LRC specific bits of a context
2448 * @ctx: LR context to create.
2449 * @engine: engine to be used with the context.
2451 * This function can be called more than once, with different engines, if we plan
2452 * to use the context with them. The context backing objects and the ringbuffers
2453 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
2454 * the creation is a deferred call: it's better to make sure first that we need to use
2455 * a given ring with the context.
2457 * Return: non-zero on error.
2459 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2460 struct intel_engine_cs *engine)
2462 struct drm_i915_gem_object *ctx_obj;
2463 struct intel_context *ce = &ctx->engine[engine->id];
2464 uint32_t context_size;
2465 struct intel_ringbuffer *ringbuf;
2470 context_size = round_up(intel_lr_context_size(engine), 4096);
2472 /* One extra page as the sharing data between driver and GuC */
2473 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2475 ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
2476 if (IS_ERR(ctx_obj)) {
2477 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2478 return PTR_ERR(ctx_obj);
2481 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
2482 if (IS_ERR(ringbuf)) {
2483 ret = PTR_ERR(ringbuf);
2484 goto error_deref_obj;
2487 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
2489 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2493 ce->ringbuf = ringbuf;
2494 ce->state = ctx_obj;
2495 ce->initialised = engine->init_context == NULL;
2500 intel_ringbuffer_free(ringbuf);
2502 drm_gem_object_unreference(&ctx_obj->base);
2508 void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2509 struct i915_gem_context *ctx)
2511 struct intel_engine_cs *engine;
2513 for_each_engine(engine, dev_priv) {
2514 struct intel_context *ce = &ctx->engine[engine->id];
2515 struct drm_i915_gem_object *ctx_obj = ce->state;
2517 uint32_t *reg_state;
2522 vaddr = i915_gem_object_pin_map(ctx_obj);
2523 if (WARN_ON(IS_ERR(vaddr)))
2526 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2527 ctx_obj->dirty = true;
2529 reg_state[CTX_RING_HEAD+1] = 0;
2530 reg_state[CTX_RING_TAIL+1] = 0;
2532 i915_gem_object_unpin_map(ctx_obj);
2534 ce->ringbuf->head = 0;
2535 ce->ringbuf->tail = 0;