1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 #include <linux/hashtable.h>
5 #include "i915_gem_batch_pool.h"
6 #include "i915_gem_request.h"
8 #define I915_CMD_HASH_ORDER 9
10 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
11 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
12 * to give some inclination as to some of the magic values used in the various
15 #define CACHELINE_BYTES 64
16 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
19 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
20 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
21 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
23 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
24 * cacheline, the Head Pointer must not be greater than the Tail
27 #define I915_RING_FREE_SPACE 64
29 struct intel_hw_status_page {
35 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
36 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
38 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
39 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
41 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
42 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
44 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
45 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
47 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
48 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
50 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
51 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
53 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
54 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
56 #define gen8_semaphore_seqno_size sizeof(uint64_t)
57 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
58 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
59 #define GEN8_SIGNAL_OFFSET(__ring, to) \
60 (dev_priv->semaphore->node.start + \
61 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
62 #define GEN8_WAIT_OFFSET(__ring, from) \
63 (dev_priv->semaphore->node.start + \
64 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
66 enum intel_engine_hangcheck_action {
74 #define HANGCHECK_SCORE_RING_HUNG 31
76 #define I915_MAX_SLICES 3
77 #define I915_MAX_SUBSLICES 3
79 #define instdone_slice_mask(dev_priv__) \
80 (INTEL_GEN(dev_priv__) == 7 ? \
81 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
83 #define instdone_subslice_mask(dev_priv__) \
84 (INTEL_GEN(dev_priv__) == 7 ? \
85 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
87 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
88 for ((slice__) = 0, (subslice__) = 0; \
89 (slice__) < I915_MAX_SLICES; \
90 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
91 (slice__) += ((subslice__) == 0)) \
92 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
93 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
95 struct intel_instdone {
97 /* The following exist only in the RCS engine */
99 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
100 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
103 struct intel_engine_hangcheck {
107 enum intel_engine_hangcheck_action action;
109 struct intel_instdone instdone;
113 struct i915_vma *vma;
116 struct intel_engine_cs *engine;
118 struct list_head request_list;
126 /** We track the position of the requests in the ring buffer, and
127 * when each is retired we increment last_retired_head as the GPU
128 * must have finished processing the request and so we know we
129 * can advance the ringbuffer up to that position.
131 * last_retired_head is set to -1 after the value is consumed so
132 * we can detect new retirements.
134 u32 last_retired_head;
137 struct i915_gem_context;
138 struct drm_i915_reg_table;
141 * we use a single page to load ctx workarounds so all of these
142 * values are referred in terms of dwords
144 * struct i915_wa_ctx_bb:
145 * offset: specifies batch starting position, also helpful in case
146 * if we want to have multiple batches at different offsets based on
147 * some criteria. It is not a requirement at the moment but provides
148 * an option for future use.
149 * size: size of the batch in DWORDS
151 struct i915_ctx_workarounds {
152 struct i915_wa_ctx_bb {
155 } indirect_ctx, per_ctx;
156 struct i915_vma *vma;
159 struct drm_i915_gem_request;
161 struct intel_engine_cs {
162 struct drm_i915_private *i915;
164 enum intel_engine_id {
168 VCS2, /* Keep instances of the same type engine together. */
171 #define I915_NUM_ENGINES 5
172 #define _VCS(n) (VCS + (n))
173 unsigned int exec_id;
174 enum intel_engine_hw_id {
181 enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
184 unsigned int irq_shift;
185 struct intel_ring *buffer;
187 /* Rather than have every client wait upon all user interrupts,
188 * with the herd waking after every interrupt and each doing the
189 * heavyweight seqno dance, we delegate the task (of being the
190 * bottom-half of the user interrupt) to the first client. After
191 * every interrupt, we wake up one client, who does the heavyweight
192 * coherent seqno read and either goes back to sleep (if incomplete),
193 * or wakes up all the completed clients in parallel, before then
194 * transferring the bottom-half status to the next client in the queue.
196 * Compared to walking the entire list of waiters in a single dedicated
197 * bottom-half, we reduce the latency of the first waiter by avoiding
198 * a context switch, but incur additional coherent seqno reads when
199 * following the chain of request breadcrumbs. Since it is most likely
200 * that we have a single client waiting on each seqno, then reducing
201 * the overhead of waking that client is much preferred.
203 struct intel_breadcrumbs {
204 struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
207 spinlock_t lock; /* protects the lists of requests */
208 struct rb_root waiters; /* sorted by retirement, priority */
209 struct rb_root signals; /* sorted by retirement */
210 struct intel_wait *first_wait; /* oldest waiter by retirement */
211 struct task_struct *signaler; /* used for fence signalling */
212 struct drm_i915_gem_request *first_signal;
213 struct timer_list fake_irq; /* used after a missed interrupt */
214 struct timer_list hangcheck; /* detect missed interrupts */
216 unsigned long timeout;
218 bool irq_enabled : 1;
219 bool rpm_wakelock : 1;
223 * A pool of objects to use as shadow copies of client batch buffers
224 * when the command parser is enabled. Prevents the client from
225 * modifying the batch contents after software parsing.
227 struct i915_gem_batch_pool batch_pool;
229 struct intel_hw_status_page status_page;
230 struct i915_ctx_workarounds wa_ctx;
231 struct i915_vma *scratch;
233 u32 irq_keep_mask; /* always keep these interrupts */
234 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
235 void (*irq_enable)(struct intel_engine_cs *engine);
236 void (*irq_disable)(struct intel_engine_cs *engine);
238 int (*init_hw)(struct intel_engine_cs *engine);
239 void (*reset_hw)(struct intel_engine_cs *engine,
240 struct drm_i915_gem_request *req);
242 int (*init_context)(struct drm_i915_gem_request *req);
244 int (*emit_flush)(struct drm_i915_gem_request *request,
246 #define EMIT_INVALIDATE BIT(0)
247 #define EMIT_FLUSH BIT(1)
248 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
249 int (*emit_bb_start)(struct drm_i915_gem_request *req,
250 u64 offset, u32 length,
251 unsigned int dispatch_flags);
252 #define I915_DISPATCH_SECURE BIT(0)
253 #define I915_DISPATCH_PINNED BIT(1)
254 #define I915_DISPATCH_RS BIT(2)
255 int (*emit_request)(struct drm_i915_gem_request *req);
257 /* Pass the request to the hardware queue (e.g. directly into
258 * the legacy ringbuffer or to the end of an execlist).
260 * This is called from an atomic context with irqs disabled; must
263 void (*submit_request)(struct drm_i915_gem_request *req);
265 /* Some chipsets are not quite as coherent as advertised and need
266 * an expensive kick to force a true read of the up-to-date seqno.
267 * However, the up-to-date seqno is not always required and the last
268 * seen value is good enough. Note that the seqno will always be
269 * monotonic, even if not coherent.
271 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
272 void (*cleanup)(struct intel_engine_cs *engine);
274 /* GEN8 signal/wait table - never trust comments!
275 * signal to signal to signal to signal to signal to
276 * RCS VCS BCS VECS VCS2
277 * --------------------------------------------------------------------
278 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
279 * |-------------------------------------------------------------------
280 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
281 * |-------------------------------------------------------------------
282 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
283 * |-------------------------------------------------------------------
284 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
285 * |-------------------------------------------------------------------
286 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
287 * |-------------------------------------------------------------------
290 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
291 * ie. transpose of g(x, y)
293 * sync from sync from sync from sync from sync from
294 * RCS VCS BCS VECS VCS2
295 * --------------------------------------------------------------------
296 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
297 * |-------------------------------------------------------------------
298 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
299 * |-------------------------------------------------------------------
300 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
301 * |-------------------------------------------------------------------
302 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
303 * |-------------------------------------------------------------------
304 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
305 * |-------------------------------------------------------------------
308 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
309 * ie. transpose of f(x, y)
312 u32 sync_seqno[I915_NUM_ENGINES-1];
315 #define GEN6_SEMAPHORE_LAST VECS_HW
316 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
317 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
319 /* our mbox written by others */
320 u32 wait[GEN6_NUM_SEMAPHORES];
321 /* mboxes this ring signals to */
322 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
324 u64 signal_ggtt[I915_NUM_ENGINES];
328 int (*sync_to)(struct drm_i915_gem_request *req,
329 struct drm_i915_gem_request *signal);
330 int (*signal)(struct drm_i915_gem_request *req);
334 struct tasklet_struct irq_tasklet;
335 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
336 struct execlist_port {
337 struct drm_i915_gem_request *request;
340 struct list_head execlist_queue;
341 unsigned int fw_domains;
342 bool disable_lite_restore_wa;
344 u32 ctx_desc_template;
347 * List of breadcrumbs associated with GPU requests currently
350 struct list_head request_list;
353 * Seqno of request most recently submitted to request_list.
354 * Used exclusively by hang checker to avoid grabbing lock while
355 * inspecting request list.
357 u32 last_submitted_seqno;
358 u32 last_pending_seqno;
360 /* An RCU guarded pointer to the last request. No reference is
361 * held to the request, users must carefully acquire a reference to
362 * the request using i915_gem_active_get_rcu(), or hold the
365 struct i915_gem_active last_request;
367 struct i915_gem_context *last_context;
369 struct intel_engine_hangcheck hangcheck;
371 bool needs_cmd_parser;
374 * Table of commands the command parser needs to know about
377 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
380 * Table of registers allowed in commands that read/write registers.
382 const struct drm_i915_reg_table *reg_tables;
386 * Returns the bitmask for the length field of the specified command.
387 * Return 0 for an unrecognized/invalid command.
389 * If the command parser finds an entry for a command in the engine's
390 * cmd_tables, it gets the command's length based on the table entry.
391 * If not, it calls this function to determine the per-engine length
392 * field encoding for the command (i.e. different opcode ranges use
393 * certain bits to encode the command length in the header).
395 u32 (*get_cmd_length_mask)(u32 cmd_header);
398 static inline unsigned
399 intel_engine_flag(const struct intel_engine_cs *engine)
401 return 1 << engine->id;
405 intel_engine_sync_index(struct intel_engine_cs *engine,
406 struct intel_engine_cs *other)
411 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
412 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
413 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
414 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
415 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
418 idx = (other->id - engine->id) - 1;
420 idx += I915_NUM_ENGINES;
426 intel_flush_status_page(struct intel_engine_cs *engine, int reg)
429 clflush(&engine->status_page.page_addr[reg]);
434 intel_read_status_page(struct intel_engine_cs *engine, int reg)
436 /* Ensure that the compiler doesn't optimize away the load. */
437 return READ_ONCE(engine->status_page.page_addr[reg]);
441 intel_write_status_page(struct intel_engine_cs *engine,
444 engine->status_page.page_addr[reg] = value;
448 * Reads a dword out of the status page, which is written to from the command
449 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
452 * The following dwords have a reserved meaning:
453 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
454 * 0x04: ring 0 head pointer
455 * 0x05: ring 1 head pointer (915-class)
456 * 0x06: ring 2 head pointer (915-class)
457 * 0x10-0x1b: Context status DWords (GM45)
458 * 0x1f: Last written status offset. (GM45)
459 * 0x20-0x2f: Reserved (Gen6+)
461 * The area from dword 0x30 to 0x3ff is available for driver usage.
463 #define I915_GEM_HWS_INDEX 0x30
464 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
465 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
466 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
469 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
470 int intel_ring_pin(struct intel_ring *ring);
471 void intel_ring_unpin(struct intel_ring *ring);
472 void intel_ring_free(struct intel_ring *ring);
474 void intel_engine_stop(struct intel_engine_cs *engine);
475 void intel_engine_cleanup(struct intel_engine_cs *engine);
477 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
479 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
481 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
482 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
484 static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
486 *(uint32_t *)(ring->vaddr + ring->tail) = data;
490 static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
492 intel_ring_emit(ring, i915_mmio_reg_offset(reg));
495 static inline void intel_ring_advance(struct intel_ring *ring)
499 * This serves as a placeholder in the code so that the reader
500 * can compare against the preceding intel_ring_begin() and
501 * check that the number of dwords emitted matches the space
502 * reserved for the command packet (i.e. the value passed to
503 * intel_ring_begin()).
507 static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
509 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
510 return value & (ring->size - 1);
513 int __intel_ring_space(int head, int tail, int size);
514 void intel_ring_update_space(struct intel_ring *ring);
516 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
518 void intel_engine_setup_common(struct intel_engine_cs *engine);
519 int intel_engine_init_common(struct intel_engine_cs *engine);
520 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
521 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
523 static inline int intel_engine_idle(struct intel_engine_cs *engine,
526 /* Wait upon the last request to be completed */
527 return i915_gem_active_wait_unlocked(&engine->last_request, flags);
530 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
531 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
532 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
533 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
534 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
536 u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
537 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
539 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
541 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
544 int init_workarounds_ring(struct intel_engine_cs *engine);
546 void intel_engine_get_instdone(struct intel_engine_cs *engine,
547 struct intel_instdone *instdone);
550 * Arbitrary size for largest possible 'add request' sequence. The code paths
551 * are complex and variable. Empirical measurement shows that the worst case
552 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
553 * we need to allocate double the largest single packet within that emission
554 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
556 #define MIN_SPACE_FOR_ADD_REQUEST 336
558 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
560 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
563 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
564 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
566 static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
572 static inline bool intel_wait_complete(const struct intel_wait *wait)
574 return RB_EMPTY_NODE(&wait->node);
577 bool intel_engine_add_wait(struct intel_engine_cs *engine,
578 struct intel_wait *wait);
579 void intel_engine_remove_wait(struct intel_engine_cs *engine,
580 struct intel_wait *wait);
581 void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
583 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
585 return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
588 static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
592 /* Note that for this not to dangerously chase a dangling pointer,
593 * we must hold the rcu_read_lock here.
595 * Also note that tsk is likely to be in !TASK_RUNNING state so an
596 * early test for tsk->state != TASK_RUNNING before wake_up_process()
597 * is unlikely to be beneficial.
599 if (intel_engine_has_waiter(engine)) {
600 struct task_struct *tsk;
603 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
605 wakeup = wake_up_process(tsk);
612 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
613 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
614 unsigned int intel_kick_waiters(struct drm_i915_private *i915);
615 unsigned int intel_kick_signalers(struct drm_i915_private *i915);
617 static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
619 return i915_gem_active_isset(&engine->last_request);
622 #endif /* _INTEL_RINGBUFFER_H_ */