1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
10 * cacheline, the Head Pointer must not be greater than the Tail
13 #define I915_RING_FREE_SPACE 64
15 struct intel_hw_status_page {
17 unsigned int gfx_addr;
18 struct drm_i915_gem_object *obj;
21 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
22 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
24 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
25 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
27 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
28 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
30 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
31 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
33 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
34 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
36 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
37 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
39 enum intel_ring_hangcheck_action {
47 #define HANGCHECK_SCORE_RING_HUNG 31
49 struct intel_ring_hangcheck {
53 enum intel_ring_hangcheck_action action;
57 struct intel_ring_buffer {
66 #define I915_NUM_RINGS 5
67 #define LAST_USER_RING (VECS + 1)
69 void __iomem *virtual_start;
70 struct drm_device *dev;
71 struct drm_i915_gem_object *obj;
78 struct intel_hw_status_page status_page;
80 /** We track the position of the requests in the ring buffer, and
81 * when each is retired we increment last_retired_head as the GPU
82 * must have finished processing the request and so we know we
83 * can advance the ringbuffer up to that position.
85 * last_retired_head is set to -1 after the value is consumed so
86 * we can detect new retirements.
88 u32 last_retired_head;
90 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
91 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
93 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
94 void (*irq_put)(struct intel_ring_buffer *ring);
96 int (*init)(struct intel_ring_buffer *ring);
98 void (*write_tail)(struct intel_ring_buffer *ring,
100 int __must_check (*flush)(struct intel_ring_buffer *ring,
101 u32 invalidate_domains,
103 int (*add_request)(struct intel_ring_buffer *ring);
104 /* Some chipsets are not quite as coherent as advertised and need
105 * an expensive kick to force a true read of the up-to-date seqno.
106 * However, the up-to-date seqno is not always required and the last
107 * seen value is good enough. Note that the seqno will always be
108 * monotonic, even if not coherent.
110 u32 (*get_seqno)(struct intel_ring_buffer *ring,
111 bool lazy_coherency);
112 void (*set_seqno)(struct intel_ring_buffer *ring,
114 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
115 u32 offset, u32 length,
117 #define I915_DISPATCH_SECURE 0x1
118 #define I915_DISPATCH_PINNED 0x2
119 void (*cleanup)(struct intel_ring_buffer *ring);
122 u32 sync_seqno[I915_NUM_RINGS-1];
125 /* our mbox written by others */
126 u32 wait[I915_NUM_RINGS];
127 /* mboxes this ring signals to */
128 u32 signal[I915_NUM_RINGS];
132 int (*sync_to)(struct intel_ring_buffer *ring,
133 struct intel_ring_buffer *to,
135 int (*signal)(struct intel_ring_buffer *signaller,
136 /* num_dwords needed by caller */
137 unsigned int num_dwords);
141 * List of objects currently involved in rendering from the
144 * Includes buffers having the contents of their GPU caches
145 * flushed, not necessarily primitives. last_rendering_seqno
146 * represents when the rendering involved will be completed.
148 * A reference is held on the buffer while on this list.
150 struct list_head active_list;
153 * List of breadcrumbs associated with GPU requests currently
156 struct list_head request_list;
159 * Do we have some not yet emitted requests outstanding?
161 struct drm_i915_gem_request *preallocated_lazy_request;
162 u32 outstanding_lazy_seqno;
163 bool gpu_caches_dirty;
166 wait_queue_head_t irq_queue;
168 struct i915_hw_context *default_context;
169 struct i915_hw_context *last_context;
171 struct intel_ring_hangcheck hangcheck;
174 struct drm_i915_gem_object *obj;
176 volatile u32 *cpu_page;
180 * Tables of commands the command parser needs to know about
183 const struct drm_i915_cmd_table *cmd_tables;
187 * Table of registers allowed in commands that read/write registers.
189 const u32 *reg_table;
193 * Table of registers allowed in commands that read/write registers, but
194 * only from the DRM master.
196 const u32 *master_reg_table;
197 int master_reg_count;
200 * Returns the bitmask for the length field of the specified command.
201 * Return 0 for an unrecognized/invalid command.
203 * If the command parser finds an entry for a command in the ring's
204 * cmd_tables, it gets the command's length based on the table entry.
205 * If not, it calls this function to determine the per-ring length field
206 * encoding for the command (i.e. certain opcode ranges use certain bits
207 * to encode the command length in the header).
209 u32 (*get_cmd_length_mask)(u32 cmd_header);
213 intel_ring_initialized(struct intel_ring_buffer *ring)
215 return ring->obj != NULL;
218 static inline unsigned
219 intel_ring_flag(struct intel_ring_buffer *ring)
221 return 1 << ring->id;
225 intel_ring_sync_index(struct intel_ring_buffer *ring,
226 struct intel_ring_buffer *other)
231 * cs -> 0 = vcs, 1 = bcs
232 * vcs -> 0 = bcs, 1 = cs,
233 * bcs -> 0 = cs, 1 = vcs.
236 idx = (other - ring) - 1;
238 idx += I915_NUM_RINGS;
244 intel_read_status_page(struct intel_ring_buffer *ring,
247 /* Ensure that the compiler doesn't optimize away the load. */
249 return ring->status_page.page_addr[reg];
253 intel_write_status_page(struct intel_ring_buffer *ring,
256 ring->status_page.page_addr[reg] = value;
260 * Reads a dword out of the status page, which is written to from the command
261 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
264 * The following dwords have a reserved meaning:
265 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
266 * 0x04: ring 0 head pointer
267 * 0x05: ring 1 head pointer (915-class)
268 * 0x06: ring 2 head pointer (915-class)
269 * 0x10-0x1b: Context status DWords (GM45)
270 * 0x1f: Last written status offset. (GM45)
272 * The area from dword 0x20 to 0x3ff is available for driver usage.
274 #define I915_GEM_HWS_INDEX 0x20
275 #define I915_GEM_HWS_SCRATCH_INDEX 0x30
276 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
278 void intel_stop_ring_buffer(struct intel_ring_buffer *ring);
279 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
281 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
282 int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
283 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
286 iowrite32(data, ring->virtual_start + ring->tail);
289 static inline void intel_ring_advance(struct intel_ring_buffer *ring)
291 ring->tail &= ring->size - 1;
293 void __intel_ring_advance(struct intel_ring_buffer *ring);
295 int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
296 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
297 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
298 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
300 int intel_init_render_ring_buffer(struct drm_device *dev);
301 int intel_init_bsd_ring_buffer(struct drm_device *dev);
302 int intel_init_bsd2_ring_buffer(struct drm_device *dev);
303 int intel_init_blt_ring_buffer(struct drm_device *dev);
304 int intel_init_vebox_ring_buffer(struct drm_device *dev);
306 u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
307 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
309 static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
314 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
316 BUG_ON(ring->outstanding_lazy_seqno == 0);
317 return ring->outstanding_lazy_seqno;
320 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
322 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
323 ring->trace_irq_seqno = seqno;
327 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
329 #endif /* _INTEL_RINGBUFFER_H_ */