1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 #include <linux/hashtable.h>
5 #include "i915_gem_batch_pool.h"
7 #define I915_CMD_HASH_ORDER 9
9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
11 * to give some inclination as to some of the magic values used in the various
14 #define CACHELINE_BYTES 64
15 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
18 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
19 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
20 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
22 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
23 * cacheline, the Head Pointer must not be greater than the Tail
26 #define I915_RING_FREE_SPACE 64
28 struct intel_hw_status_page {
30 unsigned int gfx_addr;
31 struct drm_i915_gem_object *obj;
34 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
35 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
37 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
38 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
40 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
41 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
43 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
44 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
46 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
47 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
49 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
50 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
52 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
53 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
55 #define gen8_semaphore_seqno_size sizeof(uint64_t)
56 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
57 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
58 #define GEN8_SIGNAL_OFFSET(__ring, to) \
59 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
60 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
61 #define GEN8_WAIT_OFFSET(__ring, from) \
62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
65 enum intel_ring_hangcheck_action {
73 #define HANGCHECK_SCORE_RING_HUNG 31
75 struct intel_ring_hangcheck {
77 unsigned long user_interrupts;
80 enum intel_ring_hangcheck_action action;
82 u32 instdone[I915_NUM_INSTDONE_REG];
85 struct intel_ringbuffer {
86 struct drm_i915_gem_object *obj;
87 void __iomem *virtual_start;
90 struct intel_engine_cs *engine;
91 struct list_head link;
99 /** We track the position of the requests in the ring buffer, and
100 * when each is retired we increment last_retired_head as the GPU
101 * must have finished processing the request and so we know we
102 * can advance the ringbuffer up to that position.
104 * last_retired_head is set to -1 after the value is consumed so
105 * we can detect new retirements.
107 u32 last_retired_head;
110 struct i915_gem_context;
111 struct drm_i915_reg_table;
114 * we use a single page to load ctx workarounds so all of these
115 * values are referred in terms of dwords
117 * struct i915_wa_ctx_bb:
118 * offset: specifies batch starting position, also helpful in case
119 * if we want to have multiple batches at different offsets based on
120 * some criteria. It is not a requirement at the moment but provides
121 * an option for future use.
122 * size: size of the batch in DWORDS
124 struct i915_ctx_workarounds {
125 struct i915_wa_ctx_bb {
128 } indirect_ctx, per_ctx;
129 struct drm_i915_gem_object *obj;
132 struct drm_i915_gem_request;
134 struct intel_engine_cs {
135 struct drm_i915_private *i915;
137 enum intel_engine_id {
141 VCS2, /* Keep instances of the same type engine together. */
144 #define I915_NUM_ENGINES 5
145 #define _VCS(n) (VCS + (n))
146 unsigned int exec_id;
148 unsigned int guc_id; /* XXX same as hw_id? */
150 unsigned int irq_shift;
151 struct intel_ringbuffer *buffer;
152 struct list_head buffers;
154 /* Rather than have every client wait upon all user interrupts,
155 * with the herd waking after every interrupt and each doing the
156 * heavyweight seqno dance, we delegate the task (of being the
157 * bottom-half of the user interrupt) to the first client. After
158 * every interrupt, we wake up one client, who does the heavyweight
159 * coherent seqno read and either goes back to sleep (if incomplete),
160 * or wakes up all the completed clients in parallel, before then
161 * transferring the bottom-half status to the next client in the queue.
163 * Compared to walking the entire list of waiters in a single dedicated
164 * bottom-half, we reduce the latency of the first waiter by avoiding
165 * a context switch, but incur additional coherent seqno reads when
166 * following the chain of request breadcrumbs. Since it is most likely
167 * that we have a single client waiting on each seqno, then reducing
168 * the overhead of waking that client is much preferred.
170 struct intel_breadcrumbs {
171 struct task_struct *irq_seqno_bh; /* bh for user interrupts */
172 unsigned long irq_wakeups;
175 spinlock_t lock; /* protects the lists of requests */
176 struct rb_root waiters; /* sorted by retirement, priority */
177 struct rb_root signals; /* sorted by retirement */
178 struct intel_wait *first_wait; /* oldest waiter by retirement */
179 struct task_struct *signaler; /* used for fence signalling */
180 struct drm_i915_gem_request *first_signal;
181 struct timer_list fake_irq; /* used after a missed interrupt */
183 bool irq_enabled : 1;
184 bool rpm_wakelock : 1;
188 * A pool of objects to use as shadow copies of client batch buffers
189 * when the command parser is enabled. Prevents the client from
190 * modifying the batch contents after software parsing.
192 struct i915_gem_batch_pool batch_pool;
194 struct intel_hw_status_page status_page;
195 struct i915_ctx_workarounds wa_ctx;
197 u32 irq_keep_mask; /* always keep these interrupts */
198 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
199 void (*irq_enable)(struct intel_engine_cs *ring);
200 void (*irq_disable)(struct intel_engine_cs *ring);
202 int (*init_hw)(struct intel_engine_cs *ring);
204 int (*init_context)(struct drm_i915_gem_request *req);
206 void (*write_tail)(struct intel_engine_cs *ring,
208 int __must_check (*flush)(struct drm_i915_gem_request *req,
209 u32 invalidate_domains,
211 int (*add_request)(struct drm_i915_gem_request *req);
212 /* Some chipsets are not quite as coherent as advertised and need
213 * an expensive kick to force a true read of the up-to-date seqno.
214 * However, the up-to-date seqno is not always required and the last
215 * seen value is good enough. Note that the seqno will always be
216 * monotonic, even if not coherent.
218 void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
219 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
220 u64 offset, u32 length,
221 unsigned dispatch_flags);
222 #define I915_DISPATCH_SECURE 0x1
223 #define I915_DISPATCH_PINNED 0x2
224 #define I915_DISPATCH_RS 0x4
225 void (*cleanup)(struct intel_engine_cs *ring);
227 /* GEN8 signal/wait table - never trust comments!
228 * signal to signal to signal to signal to signal to
229 * RCS VCS BCS VECS VCS2
230 * --------------------------------------------------------------------
231 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
232 * |-------------------------------------------------------------------
233 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
234 * |-------------------------------------------------------------------
235 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
236 * |-------------------------------------------------------------------
237 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
238 * |-------------------------------------------------------------------
239 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
240 * |-------------------------------------------------------------------
243 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
244 * ie. transpose of g(x, y)
246 * sync from sync from sync from sync from sync from
247 * RCS VCS BCS VECS VCS2
248 * --------------------------------------------------------------------
249 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
250 * |-------------------------------------------------------------------
251 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
252 * |-------------------------------------------------------------------
253 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
254 * |-------------------------------------------------------------------
255 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
256 * |-------------------------------------------------------------------
257 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
258 * |-------------------------------------------------------------------
261 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
262 * ie. transpose of f(x, y)
265 u32 sync_seqno[I915_NUM_ENGINES-1];
269 /* our mbox written by others */
270 u32 wait[I915_NUM_ENGINES];
271 /* mboxes this ring signals to */
272 i915_reg_t signal[I915_NUM_ENGINES];
274 u64 signal_ggtt[I915_NUM_ENGINES];
278 int (*sync_to)(struct drm_i915_gem_request *to_req,
279 struct intel_engine_cs *from,
281 int (*signal)(struct drm_i915_gem_request *signaller_req,
282 /* num_dwords needed by caller */
283 unsigned int num_dwords);
287 struct tasklet_struct irq_tasklet;
288 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
289 struct list_head execlist_queue;
290 unsigned int fw_domains;
291 unsigned int next_context_status_buffer;
292 unsigned int idle_lite_restore_wa;
293 bool disable_lite_restore_wa;
294 u32 ctx_desc_template;
295 int (*emit_request)(struct drm_i915_gem_request *request);
296 int (*emit_flush)(struct drm_i915_gem_request *request,
297 u32 invalidate_domains,
299 int (*emit_bb_start)(struct drm_i915_gem_request *req,
300 u64 offset, unsigned dispatch_flags);
303 * List of objects currently involved in rendering from the
306 * Includes buffers having the contents of their GPU caches
307 * flushed, not necessarily primitives. last_read_req
308 * represents when the rendering involved will be completed.
310 * A reference is held on the buffer while on this list.
312 struct list_head active_list;
315 * List of breadcrumbs associated with GPU requests currently
318 struct list_head request_list;
321 * Seqno of request most recently submitted to request_list.
322 * Used exclusively by hang checker to avoid grabbing lock while
323 * inspecting request list.
325 u32 last_submitted_seqno;
327 bool gpu_caches_dirty;
329 struct i915_gem_context *last_context;
331 struct intel_ring_hangcheck hangcheck;
334 struct drm_i915_gem_object *obj;
338 bool needs_cmd_parser;
341 * Table of commands the command parser needs to know about
344 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
347 * Table of registers allowed in commands that read/write registers.
349 const struct drm_i915_reg_table *reg_tables;
353 * Returns the bitmask for the length field of the specified command.
354 * Return 0 for an unrecognized/invalid command.
356 * If the command parser finds an entry for a command in the ring's
357 * cmd_tables, it gets the command's length based on the table entry.
358 * If not, it calls this function to determine the per-ring length field
359 * encoding for the command (i.e. certain opcode ranges use certain bits
360 * to encode the command length in the header).
362 u32 (*get_cmd_length_mask)(u32 cmd_header);
366 intel_engine_initialized(const struct intel_engine_cs *engine)
368 return engine->i915 != NULL;
371 static inline unsigned
372 intel_engine_flag(const struct intel_engine_cs *engine)
374 return 1 << engine->id;
378 intel_ring_sync_index(struct intel_engine_cs *engine,
379 struct intel_engine_cs *other)
384 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
385 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
386 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
387 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
388 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
391 idx = (other - engine) - 1;
393 idx += I915_NUM_ENGINES;
399 intel_flush_status_page(struct intel_engine_cs *engine, int reg)
402 clflush(&engine->status_page.page_addr[reg]);
407 intel_read_status_page(struct intel_engine_cs *engine, int reg)
409 /* Ensure that the compiler doesn't optimize away the load. */
410 return READ_ONCE(engine->status_page.page_addr[reg]);
414 intel_write_status_page(struct intel_engine_cs *engine,
417 engine->status_page.page_addr[reg] = value;
421 * Reads a dword out of the status page, which is written to from the command
422 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
425 * The following dwords have a reserved meaning:
426 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
427 * 0x04: ring 0 head pointer
428 * 0x05: ring 1 head pointer (915-class)
429 * 0x06: ring 2 head pointer (915-class)
430 * 0x10-0x1b: Context status DWords (GM45)
431 * 0x1f: Last written status offset. (GM45)
432 * 0x20-0x2f: Reserved (Gen6+)
434 * The area from dword 0x30 to 0x3ff is available for driver usage.
436 #define I915_GEM_HWS_INDEX 0x30
437 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
438 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
439 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
441 struct intel_ringbuffer *
442 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
443 int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
444 struct intel_ringbuffer *ringbuf);
445 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
446 void intel_ringbuffer_free(struct intel_ringbuffer *ring);
448 void intel_stop_engine(struct intel_engine_cs *engine);
449 void intel_cleanup_engine(struct intel_engine_cs *engine);
451 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
453 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
454 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
455 static inline void intel_ring_emit(struct intel_engine_cs *engine,
458 struct intel_ringbuffer *ringbuf = engine->buffer;
459 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
462 static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
465 intel_ring_emit(engine, i915_mmio_reg_offset(reg));
467 static inline void intel_ring_advance(struct intel_engine_cs *engine)
469 struct intel_ringbuffer *ringbuf = engine->buffer;
470 ringbuf->tail &= ringbuf->size - 1;
472 int __intel_ring_space(int head, int tail, int size);
473 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
475 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
476 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
477 int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
478 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
480 int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
481 void intel_fini_pipe_control(struct intel_engine_cs *engine);
483 void intel_engine_setup_common(struct intel_engine_cs *engine);
484 int intel_engine_init_common(struct intel_engine_cs *engine);
486 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
487 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
488 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
489 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
490 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
492 u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
493 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
495 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
498 int init_workarounds_ring(struct intel_engine_cs *engine);
500 static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
502 return ringbuf->tail;
506 * Arbitrary size for largest possible 'add request' sequence. The code paths
507 * are complex and variable. Empirical measurement shows that the worst case
508 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
509 * we need to allocate double the largest single packet within that emission
510 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
512 #define MIN_SPACE_FOR_ADD_REQUEST 336
514 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
516 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
519 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
522 struct task_struct *tsk;
526 struct intel_signal_node {
528 struct intel_wait wait;
531 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
533 static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
539 static inline bool intel_wait_complete(const struct intel_wait *wait)
541 return RB_EMPTY_NODE(&wait->node);
544 bool intel_engine_add_wait(struct intel_engine_cs *engine,
545 struct intel_wait *wait);
546 void intel_engine_remove_wait(struct intel_engine_cs *engine,
547 struct intel_wait *wait);
548 void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
550 static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
552 return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
555 static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
558 struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
559 /* Note that for this not to dangerously chase a dangling pointer,
560 * the caller is responsible for ensure that the task remain valid for
561 * wake_up_process() i.e. that the RCU grace period cannot expire.
563 * Also note that tsk is likely to be in !TASK_RUNNING state so an
564 * early test for tsk->state != TASK_RUNNING before wake_up_process()
565 * is unlikely to be beneficial.
568 wakeup = wake_up_process(tsk);
572 void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
573 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
574 unsigned int intel_kick_waiters(struct drm_i915_private *i915);
575 unsigned int intel_kick_signalers(struct drm_i915_private *i915);
577 #endif /* _INTEL_RINGBUFFER_H_ */