]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_ringbuffer.c
7ca1b946d8de1463d8c8d3fb20b5a6876e401bad
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <drm/drmP.h>
31 #include "i915_drv.h"
32 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35
36 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
37  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
38  * to give some inclination as to some of the magic values used in the various
39  * workarounds!
40  */
41 #define CACHELINE_BYTES 64
42
43 static inline int __ring_space(int head, int tail, int size)
44 {
45         int space = head - (tail + I915_RING_FREE_SPACE);
46         if (space < 0)
47                 space += size;
48         return space;
49 }
50
51 static inline int ring_space(struct intel_engine_cs *ring)
52 {
53         struct intel_ringbuffer *ringbuf = ring->buffer;
54         return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
55 }
56
57 static bool intel_ring_stopped(struct intel_engine_cs *ring)
58 {
59         struct drm_i915_private *dev_priv = ring->dev->dev_private;
60         return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
61 }
62
63 void __intel_ring_advance(struct intel_engine_cs *ring)
64 {
65         struct intel_ringbuffer *ringbuf = ring->buffer;
66         ringbuf->tail &= ringbuf->size - 1;
67         if (intel_ring_stopped(ring))
68                 return;
69         ring->write_tail(ring, ringbuf->tail);
70 }
71
72 static int
73 gen2_render_ring_flush(struct intel_engine_cs *ring,
74                        u32      invalidate_domains,
75                        u32      flush_domains)
76 {
77         u32 cmd;
78         int ret;
79
80         cmd = MI_FLUSH;
81         if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
82                 cmd |= MI_NO_WRITE_FLUSH;
83
84         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
85                 cmd |= MI_READ_FLUSH;
86
87         ret = intel_ring_begin(ring, 2);
88         if (ret)
89                 return ret;
90
91         intel_ring_emit(ring, cmd);
92         intel_ring_emit(ring, MI_NOOP);
93         intel_ring_advance(ring);
94
95         return 0;
96 }
97
98 static int
99 gen4_render_ring_flush(struct intel_engine_cs *ring,
100                        u32      invalidate_domains,
101                        u32      flush_domains)
102 {
103         struct drm_device *dev = ring->dev;
104         u32 cmd;
105         int ret;
106
107         /*
108          * read/write caches:
109          *
110          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
111          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
112          * also flushed at 2d versus 3d pipeline switches.
113          *
114          * read-only caches:
115          *
116          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
117          * MI_READ_FLUSH is set, and is always flushed on 965.
118          *
119          * I915_GEM_DOMAIN_COMMAND may not exist?
120          *
121          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
122          * invalidated when MI_EXE_FLUSH is set.
123          *
124          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
125          * invalidated with every MI_FLUSH.
126          *
127          * TLBs:
128          *
129          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
130          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
131          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
132          * are flushed at any MI_FLUSH.
133          */
134
135         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
136         if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
137                 cmd &= ~MI_NO_WRITE_FLUSH;
138         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
139                 cmd |= MI_EXE_FLUSH;
140
141         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
142             (IS_G4X(dev) || IS_GEN5(dev)))
143                 cmd |= MI_INVALIDATE_ISP;
144
145         ret = intel_ring_begin(ring, 2);
146         if (ret)
147                 return ret;
148
149         intel_ring_emit(ring, cmd);
150         intel_ring_emit(ring, MI_NOOP);
151         intel_ring_advance(ring);
152
153         return 0;
154 }
155
156 /**
157  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
158  * implementing two workarounds on gen6.  From section 1.4.7.1
159  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
160  *
161  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
162  * produced by non-pipelined state commands), software needs to first
163  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
164  * 0.
165  *
166  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
167  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
168  *
169  * And the workaround for these two requires this workaround first:
170  *
171  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
172  * BEFORE the pipe-control with a post-sync op and no write-cache
173  * flushes.
174  *
175  * And this last workaround is tricky because of the requirements on
176  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
177  * volume 2 part 1:
178  *
179  *     "1 of the following must also be set:
180  *      - Render Target Cache Flush Enable ([12] of DW1)
181  *      - Depth Cache Flush Enable ([0] of DW1)
182  *      - Stall at Pixel Scoreboard ([1] of DW1)
183  *      - Depth Stall ([13] of DW1)
184  *      - Post-Sync Operation ([13] of DW1)
185  *      - Notify Enable ([8] of DW1)"
186  *
187  * The cache flushes require the workaround flush that triggered this
188  * one, so we can't use it.  Depth stall would trigger the same.
189  * Post-sync nonzero is what triggered this second workaround, so we
190  * can't use that one either.  Notify enable is IRQs, which aren't
191  * really our business.  That leaves only stall at scoreboard.
192  */
193 static int
194 intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
195 {
196         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
197         int ret;
198
199
200         ret = intel_ring_begin(ring, 6);
201         if (ret)
202                 return ret;
203
204         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
205         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
206                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
207         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
208         intel_ring_emit(ring, 0); /* low dword */
209         intel_ring_emit(ring, 0); /* high dword */
210         intel_ring_emit(ring, MI_NOOP);
211         intel_ring_advance(ring);
212
213         ret = intel_ring_begin(ring, 6);
214         if (ret)
215                 return ret;
216
217         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
218         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
219         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
220         intel_ring_emit(ring, 0);
221         intel_ring_emit(ring, 0);
222         intel_ring_emit(ring, MI_NOOP);
223         intel_ring_advance(ring);
224
225         return 0;
226 }
227
228 static int
229 gen6_render_ring_flush(struct intel_engine_cs *ring,
230                          u32 invalidate_domains, u32 flush_domains)
231 {
232         u32 flags = 0;
233         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
234         int ret;
235
236         /* Force SNB workarounds for PIPE_CONTROL flushes */
237         ret = intel_emit_post_sync_nonzero_flush(ring);
238         if (ret)
239                 return ret;
240
241         /* Just flush everything.  Experiments have shown that reducing the
242          * number of bits based on the write domains has little performance
243          * impact.
244          */
245         if (flush_domains) {
246                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
247                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
248                 /*
249                  * Ensure that any following seqno writes only happen
250                  * when the render cache is indeed flushed.
251                  */
252                 flags |= PIPE_CONTROL_CS_STALL;
253         }
254         if (invalidate_domains) {
255                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
256                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
257                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
258                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
259                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
260                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
261                 /*
262                  * TLB invalidate requires a post-sync write.
263                  */
264                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
265         }
266
267         ret = intel_ring_begin(ring, 4);
268         if (ret)
269                 return ret;
270
271         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
272         intel_ring_emit(ring, flags);
273         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
274         intel_ring_emit(ring, 0);
275         intel_ring_advance(ring);
276
277         return 0;
278 }
279
280 static int
281 gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
282 {
283         int ret;
284
285         ret = intel_ring_begin(ring, 4);
286         if (ret)
287                 return ret;
288
289         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
290         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
291                               PIPE_CONTROL_STALL_AT_SCOREBOARD);
292         intel_ring_emit(ring, 0);
293         intel_ring_emit(ring, 0);
294         intel_ring_advance(ring);
295
296         return 0;
297 }
298
299 static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
300 {
301         int ret;
302
303         if (!ring->fbc_dirty)
304                 return 0;
305
306         ret = intel_ring_begin(ring, 6);
307         if (ret)
308                 return ret;
309         /* WaFbcNukeOn3DBlt:ivb/hsw */
310         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
311         intel_ring_emit(ring, MSG_FBC_REND_STATE);
312         intel_ring_emit(ring, value);
313         intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
314         intel_ring_emit(ring, MSG_FBC_REND_STATE);
315         intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
316         intel_ring_advance(ring);
317
318         ring->fbc_dirty = false;
319         return 0;
320 }
321
322 static int
323 gen7_render_ring_flush(struct intel_engine_cs *ring,
324                        u32 invalidate_domains, u32 flush_domains)
325 {
326         u32 flags = 0;
327         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
328         int ret;
329
330         /*
331          * Ensure that any following seqno writes only happen when the render
332          * cache is indeed flushed.
333          *
334          * Workaround: 4th PIPE_CONTROL command (except the ones with only
335          * read-cache invalidate bits set) must have the CS_STALL bit set. We
336          * don't try to be clever and just set it unconditionally.
337          */
338         flags |= PIPE_CONTROL_CS_STALL;
339
340         /* Just flush everything.  Experiments have shown that reducing the
341          * number of bits based on the write domains has little performance
342          * impact.
343          */
344         if (flush_domains) {
345                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
346                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
347         }
348         if (invalidate_domains) {
349                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
350                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
351                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
352                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
353                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
354                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
355                 /*
356                  * TLB invalidate requires a post-sync write.
357                  */
358                 flags |= PIPE_CONTROL_QW_WRITE;
359                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
360
361                 /* Workaround: we must issue a pipe_control with CS-stall bit
362                  * set before a pipe_control command that has the state cache
363                  * invalidate bit set. */
364                 gen7_render_ring_cs_stall_wa(ring);
365         }
366
367         ret = intel_ring_begin(ring, 4);
368         if (ret)
369                 return ret;
370
371         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
372         intel_ring_emit(ring, flags);
373         intel_ring_emit(ring, scratch_addr);
374         intel_ring_emit(ring, 0);
375         intel_ring_advance(ring);
376
377         if (!invalidate_domains && flush_domains)
378                 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
379
380         return 0;
381 }
382
383 static int
384 gen8_render_ring_flush(struct intel_engine_cs *ring,
385                        u32 invalidate_domains, u32 flush_domains)
386 {
387         u32 flags = 0;
388         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
389         int ret;
390
391         flags |= PIPE_CONTROL_CS_STALL;
392
393         if (flush_domains) {
394                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
395                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
396         }
397         if (invalidate_domains) {
398                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
399                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
400                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
401                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
402                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
403                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
404                 flags |= PIPE_CONTROL_QW_WRITE;
405                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
406         }
407
408         ret = intel_ring_begin(ring, 6);
409         if (ret)
410                 return ret;
411
412         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
413         intel_ring_emit(ring, flags);
414         intel_ring_emit(ring, scratch_addr);
415         intel_ring_emit(ring, 0);
416         intel_ring_emit(ring, 0);
417         intel_ring_emit(ring, 0);
418         intel_ring_advance(ring);
419
420         return 0;
421
422 }
423
424 static void ring_write_tail(struct intel_engine_cs *ring,
425                             u32 value)
426 {
427         struct drm_i915_private *dev_priv = ring->dev->dev_private;
428         I915_WRITE_TAIL(ring, value);
429 }
430
431 u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
432 {
433         struct drm_i915_private *dev_priv = ring->dev->dev_private;
434         u64 acthd;
435
436         if (INTEL_INFO(ring->dev)->gen >= 8)
437                 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
438                                          RING_ACTHD_UDW(ring->mmio_base));
439         else if (INTEL_INFO(ring->dev)->gen >= 4)
440                 acthd = I915_READ(RING_ACTHD(ring->mmio_base));
441         else
442                 acthd = I915_READ(ACTHD);
443
444         return acthd;
445 }
446
447 static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
448 {
449         struct drm_i915_private *dev_priv = ring->dev->dev_private;
450         u32 addr;
451
452         addr = dev_priv->status_page_dmah->busaddr;
453         if (INTEL_INFO(ring->dev)->gen >= 4)
454                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
455         I915_WRITE(HWS_PGA, addr);
456 }
457
458 static bool stop_ring(struct intel_engine_cs *ring)
459 {
460         struct drm_i915_private *dev_priv = to_i915(ring->dev);
461
462         if (!IS_GEN2(ring->dev)) {
463                 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
464                 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
465                         DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
466                         return false;
467                 }
468         }
469
470         I915_WRITE_CTL(ring, 0);
471         I915_WRITE_HEAD(ring, 0);
472         ring->write_tail(ring, 0);
473
474         if (!IS_GEN2(ring->dev)) {
475                 (void)I915_READ_CTL(ring);
476                 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
477         }
478
479         return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
480 }
481
482 static int init_ring_common(struct intel_engine_cs *ring)
483 {
484         struct drm_device *dev = ring->dev;
485         struct drm_i915_private *dev_priv = dev->dev_private;
486         struct intel_ringbuffer *ringbuf = ring->buffer;
487         struct drm_i915_gem_object *obj = ringbuf->obj;
488         int ret = 0;
489
490         gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
491
492         if (!stop_ring(ring)) {
493                 /* G45 ring initialization often fails to reset head to zero */
494                 DRM_DEBUG_KMS("%s head not reset to zero "
495                               "ctl %08x head %08x tail %08x start %08x\n",
496                               ring->name,
497                               I915_READ_CTL(ring),
498                               I915_READ_HEAD(ring),
499                               I915_READ_TAIL(ring),
500                               I915_READ_START(ring));
501
502                 if (!stop_ring(ring)) {
503                         DRM_ERROR("failed to set %s head to zero "
504                                   "ctl %08x head %08x tail %08x start %08x\n",
505                                   ring->name,
506                                   I915_READ_CTL(ring),
507                                   I915_READ_HEAD(ring),
508                                   I915_READ_TAIL(ring),
509                                   I915_READ_START(ring));
510                         ret = -EIO;
511                         goto out;
512                 }
513         }
514
515         if (I915_NEED_GFX_HWS(dev))
516                 intel_ring_setup_status_page(ring);
517         else
518                 ring_setup_phys_status_page(ring);
519
520         /* Initialize the ring. This must happen _after_ we've cleared the ring
521          * registers with the above sequence (the readback of the HEAD registers
522          * also enforces ordering), otherwise the hw might lose the new ring
523          * register values. */
524         I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
525         I915_WRITE_CTL(ring,
526                         ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
527                         | RING_VALID);
528
529         /* If the head is still not zero, the ring is dead */
530         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
531                      I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
532                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
533                 DRM_ERROR("%s initialization failed "
534                           "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
535                           ring->name,
536                           I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
537                           I915_READ_HEAD(ring), I915_READ_TAIL(ring),
538                           I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
539                 ret = -EIO;
540                 goto out;
541         }
542
543         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
544                 i915_kernel_lost_context(ring->dev);
545         else {
546                 ringbuf->head = I915_READ_HEAD(ring);
547                 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
548                 ringbuf->space = ring_space(ring);
549                 ringbuf->last_retired_head = -1;
550         }
551
552         memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
553
554 out:
555         gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
556
557         return ret;
558 }
559
560 static int
561 init_pipe_control(struct intel_engine_cs *ring)
562 {
563         int ret;
564
565         if (ring->scratch.obj)
566                 return 0;
567
568         ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
569         if (ring->scratch.obj == NULL) {
570                 DRM_ERROR("Failed to allocate seqno page\n");
571                 ret = -ENOMEM;
572                 goto err;
573         }
574
575         ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
576         if (ret)
577                 goto err_unref;
578
579         ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
580         if (ret)
581                 goto err_unref;
582
583         ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
584         ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
585         if (ring->scratch.cpu_page == NULL) {
586                 ret = -ENOMEM;
587                 goto err_unpin;
588         }
589
590         DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
591                          ring->name, ring->scratch.gtt_offset);
592         return 0;
593
594 err_unpin:
595         i915_gem_object_ggtt_unpin(ring->scratch.obj);
596 err_unref:
597         drm_gem_object_unreference(&ring->scratch.obj->base);
598 err:
599         return ret;
600 }
601
602 static int init_render_ring(struct intel_engine_cs *ring)
603 {
604         struct drm_device *dev = ring->dev;
605         struct drm_i915_private *dev_priv = dev->dev_private;
606         int ret = init_ring_common(ring);
607         if (ret)
608                 return ret;
609
610         /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
611         if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
612                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
613
614         /* We need to disable the AsyncFlip performance optimisations in order
615          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
616          * programmed to '1' on all products.
617          *
618          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
619          */
620         if (INTEL_INFO(dev)->gen >= 6)
621                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
622
623         /* Required for the hardware to program scanline values for waiting */
624         /* WaEnableFlushTlbInvalidationMode:snb */
625         if (INTEL_INFO(dev)->gen == 6)
626                 I915_WRITE(GFX_MODE,
627                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
628
629         /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
630         if (IS_GEN7(dev))
631                 I915_WRITE(GFX_MODE_GEN7,
632                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
633                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
634
635         if (INTEL_INFO(dev)->gen >= 5) {
636                 ret = init_pipe_control(ring);
637                 if (ret)
638                         return ret;
639         }
640
641         if (IS_GEN6(dev)) {
642                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
643                  * "If this bit is set, STCunit will have LRA as replacement
644                  *  policy. [...] This bit must be reset.  LRA replacement
645                  *  policy is not supported."
646                  */
647                 I915_WRITE(CACHE_MODE_0,
648                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
649         }
650
651         if (INTEL_INFO(dev)->gen >= 6)
652                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
653
654         if (HAS_L3_DPF(dev))
655                 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
656
657         return ret;
658 }
659
660 static void render_ring_cleanup(struct intel_engine_cs *ring)
661 {
662         struct drm_device *dev = ring->dev;
663
664         if (ring->scratch.obj == NULL)
665                 return;
666
667         if (INTEL_INFO(dev)->gen >= 5) {
668                 kunmap(sg_page(ring->scratch.obj->pages->sgl));
669                 i915_gem_object_ggtt_unpin(ring->scratch.obj);
670         }
671
672         drm_gem_object_unreference(&ring->scratch.obj->base);
673         ring->scratch.obj = NULL;
674 }
675
676 static int gen6_signal(struct intel_engine_cs *signaller,
677                        unsigned int num_dwords)
678 {
679         struct drm_device *dev = signaller->dev;
680         struct drm_i915_private *dev_priv = dev->dev_private;
681         struct intel_engine_cs *useless;
682         int i, ret;
683
684         /* NB: In order to be able to do semaphore MBOX updates for varying
685          * number of rings, it's easiest if we round up each individual update
686          * to a multiple of 2 (since ring updates must always be a multiple of
687          * 2) even though the actual update only requires 3 dwords.
688          */
689 #define MBOX_UPDATE_DWORDS 4
690         if (i915_semaphore_is_enabled(dev))
691                 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
692         else
693                 return intel_ring_begin(signaller, num_dwords);
694
695         ret = intel_ring_begin(signaller, num_dwords);
696         if (ret)
697                 return ret;
698 #undef MBOX_UPDATE_DWORDS
699
700         for_each_ring(useless, dev_priv, i) {
701                 u32 mbox_reg = signaller->semaphore.mbox.signal[i];
702                 if (mbox_reg != GEN6_NOSYNC) {
703                         intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
704                         intel_ring_emit(signaller, mbox_reg);
705                         intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
706                         intel_ring_emit(signaller, MI_NOOP);
707                 } else {
708                         intel_ring_emit(signaller, MI_NOOP);
709                         intel_ring_emit(signaller, MI_NOOP);
710                         intel_ring_emit(signaller, MI_NOOP);
711                         intel_ring_emit(signaller, MI_NOOP);
712                 }
713         }
714
715         return 0;
716 }
717
718 /**
719  * gen6_add_request - Update the semaphore mailbox registers
720  * 
721  * @ring - ring that is adding a request
722  * @seqno - return seqno stuck into the ring
723  *
724  * Update the mailbox registers in the *other* rings with the current seqno.
725  * This acts like a signal in the canonical semaphore.
726  */
727 static int
728 gen6_add_request(struct intel_engine_cs *ring)
729 {
730         int ret;
731
732         if (ring->semaphore.signal)
733                 ret = ring->semaphore.signal(ring, 4);
734         else
735                 ret = intel_ring_begin(ring, 4);
736
737         if (ret)
738                 return ret;
739
740         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
741         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
742         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
743         intel_ring_emit(ring, MI_USER_INTERRUPT);
744         __intel_ring_advance(ring);
745
746         return 0;
747 }
748
749 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
750                                               u32 seqno)
751 {
752         struct drm_i915_private *dev_priv = dev->dev_private;
753         return dev_priv->last_seqno < seqno;
754 }
755
756 /**
757  * intel_ring_sync - sync the waiter to the signaller on seqno
758  *
759  * @waiter - ring that is waiting
760  * @signaller - ring which has, or will signal
761  * @seqno - seqno which the waiter will block on
762  */
763 static int
764 gen6_ring_sync(struct intel_engine_cs *waiter,
765                struct intel_engine_cs *signaller,
766                u32 seqno)
767 {
768         u32 dw1 = MI_SEMAPHORE_MBOX |
769                   MI_SEMAPHORE_COMPARE |
770                   MI_SEMAPHORE_REGISTER;
771         u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
772         int ret;
773
774         /* Throughout all of the GEM code, seqno passed implies our current
775          * seqno is >= the last seqno executed. However for hardware the
776          * comparison is strictly greater than.
777          */
778         seqno -= 1;
779
780         WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
781
782         ret = intel_ring_begin(waiter, 4);
783         if (ret)
784                 return ret;
785
786         /* If seqno wrap happened, omit the wait with no-ops */
787         if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
788                 intel_ring_emit(waiter, dw1 | wait_mbox);
789                 intel_ring_emit(waiter, seqno);
790                 intel_ring_emit(waiter, 0);
791                 intel_ring_emit(waiter, MI_NOOP);
792         } else {
793                 intel_ring_emit(waiter, MI_NOOP);
794                 intel_ring_emit(waiter, MI_NOOP);
795                 intel_ring_emit(waiter, MI_NOOP);
796                 intel_ring_emit(waiter, MI_NOOP);
797         }
798         intel_ring_advance(waiter);
799
800         return 0;
801 }
802
803 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
804 do {                                                                    \
805         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
806                  PIPE_CONTROL_DEPTH_STALL);                             \
807         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
808         intel_ring_emit(ring__, 0);                                                     \
809         intel_ring_emit(ring__, 0);                                                     \
810 } while (0)
811
812 static int
813 pc_render_add_request(struct intel_engine_cs *ring)
814 {
815         u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
816         int ret;
817
818         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
819          * incoherent with writes to memory, i.e. completely fubar,
820          * so we need to use PIPE_NOTIFY instead.
821          *
822          * However, we also need to workaround the qword write
823          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
824          * memory before requesting an interrupt.
825          */
826         ret = intel_ring_begin(ring, 32);
827         if (ret)
828                 return ret;
829
830         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
831                         PIPE_CONTROL_WRITE_FLUSH |
832                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
833         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
834         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
835         intel_ring_emit(ring, 0);
836         PIPE_CONTROL_FLUSH(ring, scratch_addr);
837         scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
838         PIPE_CONTROL_FLUSH(ring, scratch_addr);
839         scratch_addr += 2 * CACHELINE_BYTES;
840         PIPE_CONTROL_FLUSH(ring, scratch_addr);
841         scratch_addr += 2 * CACHELINE_BYTES;
842         PIPE_CONTROL_FLUSH(ring, scratch_addr);
843         scratch_addr += 2 * CACHELINE_BYTES;
844         PIPE_CONTROL_FLUSH(ring, scratch_addr);
845         scratch_addr += 2 * CACHELINE_BYTES;
846         PIPE_CONTROL_FLUSH(ring, scratch_addr);
847
848         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
849                         PIPE_CONTROL_WRITE_FLUSH |
850                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
851                         PIPE_CONTROL_NOTIFY);
852         intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
853         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
854         intel_ring_emit(ring, 0);
855         __intel_ring_advance(ring);
856
857         return 0;
858 }
859
860 static u32
861 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
862 {
863         /* Workaround to force correct ordering between irq and seqno writes on
864          * ivb (and maybe also on snb) by reading from a CS register (like
865          * ACTHD) before reading the status page. */
866         if (!lazy_coherency) {
867                 struct drm_i915_private *dev_priv = ring->dev->dev_private;
868                 POSTING_READ(RING_ACTHD(ring->mmio_base));
869         }
870
871         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
872 }
873
874 static u32
875 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
876 {
877         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
878 }
879
880 static void
881 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
882 {
883         intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
884 }
885
886 static u32
887 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
888 {
889         return ring->scratch.cpu_page[0];
890 }
891
892 static void
893 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
894 {
895         ring->scratch.cpu_page[0] = seqno;
896 }
897
898 static bool
899 gen5_ring_get_irq(struct intel_engine_cs *ring)
900 {
901         struct drm_device *dev = ring->dev;
902         struct drm_i915_private *dev_priv = dev->dev_private;
903         unsigned long flags;
904
905         if (!dev->irq_enabled)
906                 return false;
907
908         spin_lock_irqsave(&dev_priv->irq_lock, flags);
909         if (ring->irq_refcount++ == 0)
910                 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
911         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
912
913         return true;
914 }
915
916 static void
917 gen5_ring_put_irq(struct intel_engine_cs *ring)
918 {
919         struct drm_device *dev = ring->dev;
920         struct drm_i915_private *dev_priv = dev->dev_private;
921         unsigned long flags;
922
923         spin_lock_irqsave(&dev_priv->irq_lock, flags);
924         if (--ring->irq_refcount == 0)
925                 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
926         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
927 }
928
929 static bool
930 i9xx_ring_get_irq(struct intel_engine_cs *ring)
931 {
932         struct drm_device *dev = ring->dev;
933         struct drm_i915_private *dev_priv = dev->dev_private;
934         unsigned long flags;
935
936         if (!dev->irq_enabled)
937                 return false;
938
939         spin_lock_irqsave(&dev_priv->irq_lock, flags);
940         if (ring->irq_refcount++ == 0) {
941                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
942                 I915_WRITE(IMR, dev_priv->irq_mask);
943                 POSTING_READ(IMR);
944         }
945         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
946
947         return true;
948 }
949
950 static void
951 i9xx_ring_put_irq(struct intel_engine_cs *ring)
952 {
953         struct drm_device *dev = ring->dev;
954         struct drm_i915_private *dev_priv = dev->dev_private;
955         unsigned long flags;
956
957         spin_lock_irqsave(&dev_priv->irq_lock, flags);
958         if (--ring->irq_refcount == 0) {
959                 dev_priv->irq_mask |= ring->irq_enable_mask;
960                 I915_WRITE(IMR, dev_priv->irq_mask);
961                 POSTING_READ(IMR);
962         }
963         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
964 }
965
966 static bool
967 i8xx_ring_get_irq(struct intel_engine_cs *ring)
968 {
969         struct drm_device *dev = ring->dev;
970         struct drm_i915_private *dev_priv = dev->dev_private;
971         unsigned long flags;
972
973         if (!dev->irq_enabled)
974                 return false;
975
976         spin_lock_irqsave(&dev_priv->irq_lock, flags);
977         if (ring->irq_refcount++ == 0) {
978                 dev_priv->irq_mask &= ~ring->irq_enable_mask;
979                 I915_WRITE16(IMR, dev_priv->irq_mask);
980                 POSTING_READ16(IMR);
981         }
982         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
983
984         return true;
985 }
986
987 static void
988 i8xx_ring_put_irq(struct intel_engine_cs *ring)
989 {
990         struct drm_device *dev = ring->dev;
991         struct drm_i915_private *dev_priv = dev->dev_private;
992         unsigned long flags;
993
994         spin_lock_irqsave(&dev_priv->irq_lock, flags);
995         if (--ring->irq_refcount == 0) {
996                 dev_priv->irq_mask |= ring->irq_enable_mask;
997                 I915_WRITE16(IMR, dev_priv->irq_mask);
998                 POSTING_READ16(IMR);
999         }
1000         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1001 }
1002
1003 void intel_ring_setup_status_page(struct intel_engine_cs *ring)
1004 {
1005         struct drm_device *dev = ring->dev;
1006         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1007         u32 mmio = 0;
1008
1009         /* The ring status page addresses are no longer next to the rest of
1010          * the ring registers as of gen7.
1011          */
1012         if (IS_GEN7(dev)) {
1013                 switch (ring->id) {
1014                 case RCS:
1015                         mmio = RENDER_HWS_PGA_GEN7;
1016                         break;
1017                 case BCS:
1018                         mmio = BLT_HWS_PGA_GEN7;
1019                         break;
1020                 /*
1021                  * VCS2 actually doesn't exist on Gen7. Only shut up
1022                  * gcc switch check warning
1023                  */
1024                 case VCS2:
1025                 case VCS:
1026                         mmio = BSD_HWS_PGA_GEN7;
1027                         break;
1028                 case VECS:
1029                         mmio = VEBOX_HWS_PGA_GEN7;
1030                         break;
1031                 }
1032         } else if (IS_GEN6(ring->dev)) {
1033                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
1034         } else {
1035                 /* XXX: gen8 returns to sanity */
1036                 mmio = RING_HWS_PGA(ring->mmio_base);
1037         }
1038
1039         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
1040         POSTING_READ(mmio);
1041
1042         /*
1043          * Flush the TLB for this page
1044          *
1045          * FIXME: These two bits have disappeared on gen8, so a question
1046          * arises: do we still need this and if so how should we go about
1047          * invalidating the TLB?
1048          */
1049         if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
1050                 u32 reg = RING_INSTPM(ring->mmio_base);
1051
1052                 /* ring should be idle before issuing a sync flush*/
1053                 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1054
1055                 I915_WRITE(reg,
1056                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
1057                                               INSTPM_SYNC_FLUSH));
1058                 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1059                              1000))
1060                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
1061                                   ring->name);
1062         }
1063 }
1064
1065 static int
1066 bsd_ring_flush(struct intel_engine_cs *ring,
1067                u32     invalidate_domains,
1068                u32     flush_domains)
1069 {
1070         int ret;
1071
1072         ret = intel_ring_begin(ring, 2);
1073         if (ret)
1074                 return ret;
1075
1076         intel_ring_emit(ring, MI_FLUSH);
1077         intel_ring_emit(ring, MI_NOOP);
1078         intel_ring_advance(ring);
1079         return 0;
1080 }
1081
1082 static int
1083 i9xx_add_request(struct intel_engine_cs *ring)
1084 {
1085         int ret;
1086
1087         ret = intel_ring_begin(ring, 4);
1088         if (ret)
1089                 return ret;
1090
1091         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1092         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1093         intel_ring_emit(ring, ring->outstanding_lazy_seqno);
1094         intel_ring_emit(ring, MI_USER_INTERRUPT);
1095         __intel_ring_advance(ring);
1096
1097         return 0;
1098 }
1099
1100 static bool
1101 gen6_ring_get_irq(struct intel_engine_cs *ring)
1102 {
1103         struct drm_device *dev = ring->dev;
1104         struct drm_i915_private *dev_priv = dev->dev_private;
1105         unsigned long flags;
1106
1107         if (!dev->irq_enabled)
1108                return false;
1109
1110         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1111         if (ring->irq_refcount++ == 0) {
1112                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1113                         I915_WRITE_IMR(ring,
1114                                        ~(ring->irq_enable_mask |
1115                                          GT_PARITY_ERROR(dev)));
1116                 else
1117                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1118                 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1119         }
1120         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1121
1122         return true;
1123 }
1124
1125 static void
1126 gen6_ring_put_irq(struct intel_engine_cs *ring)
1127 {
1128         struct drm_device *dev = ring->dev;
1129         struct drm_i915_private *dev_priv = dev->dev_private;
1130         unsigned long flags;
1131
1132         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1133         if (--ring->irq_refcount == 0) {
1134                 if (HAS_L3_DPF(dev) && ring->id == RCS)
1135                         I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1136                 else
1137                         I915_WRITE_IMR(ring, ~0);
1138                 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1139         }
1140         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1141 }
1142
1143 static bool
1144 hsw_vebox_get_irq(struct intel_engine_cs *ring)
1145 {
1146         struct drm_device *dev = ring->dev;
1147         struct drm_i915_private *dev_priv = dev->dev_private;
1148         unsigned long flags;
1149
1150         if (!dev->irq_enabled)
1151                 return false;
1152
1153         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1154         if (ring->irq_refcount++ == 0) {
1155                 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1156                 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1157         }
1158         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1159
1160         return true;
1161 }
1162
1163 static void
1164 hsw_vebox_put_irq(struct intel_engine_cs *ring)
1165 {
1166         struct drm_device *dev = ring->dev;
1167         struct drm_i915_private *dev_priv = dev->dev_private;
1168         unsigned long flags;
1169
1170         if (!dev->irq_enabled)
1171                 return;
1172
1173         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1174         if (--ring->irq_refcount == 0) {
1175                 I915_WRITE_IMR(ring, ~0);
1176                 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1177         }
1178         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1179 }
1180
1181 static bool
1182 gen8_ring_get_irq(struct intel_engine_cs *ring)
1183 {
1184         struct drm_device *dev = ring->dev;
1185         struct drm_i915_private *dev_priv = dev->dev_private;
1186         unsigned long flags;
1187
1188         if (!dev->irq_enabled)
1189                 return false;
1190
1191         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1192         if (ring->irq_refcount++ == 0) {
1193                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1194                         I915_WRITE_IMR(ring,
1195                                        ~(ring->irq_enable_mask |
1196                                          GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1197                 } else {
1198                         I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1199                 }
1200                 POSTING_READ(RING_IMR(ring->mmio_base));
1201         }
1202         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1203
1204         return true;
1205 }
1206
1207 static void
1208 gen8_ring_put_irq(struct intel_engine_cs *ring)
1209 {
1210         struct drm_device *dev = ring->dev;
1211         struct drm_i915_private *dev_priv = dev->dev_private;
1212         unsigned long flags;
1213
1214         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1215         if (--ring->irq_refcount == 0) {
1216                 if (HAS_L3_DPF(dev) && ring->id == RCS) {
1217                         I915_WRITE_IMR(ring,
1218                                        ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1219                 } else {
1220                         I915_WRITE_IMR(ring, ~0);
1221                 }
1222                 POSTING_READ(RING_IMR(ring->mmio_base));
1223         }
1224         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1225 }
1226
1227 static int
1228 i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1229                          u64 offset, u32 length,
1230                          unsigned flags)
1231 {
1232         int ret;
1233
1234         ret = intel_ring_begin(ring, 2);
1235         if (ret)
1236                 return ret;
1237
1238         intel_ring_emit(ring,
1239                         MI_BATCH_BUFFER_START |
1240                         MI_BATCH_GTT |
1241                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1242         intel_ring_emit(ring, offset);
1243         intel_ring_advance(ring);
1244
1245         return 0;
1246 }
1247
1248 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1249 #define I830_BATCH_LIMIT (256*1024)
1250 static int
1251 i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1252                                 u64 offset, u32 len,
1253                                 unsigned flags)
1254 {
1255         int ret;
1256
1257         if (flags & I915_DISPATCH_PINNED) {
1258                 ret = intel_ring_begin(ring, 4);
1259                 if (ret)
1260                         return ret;
1261
1262                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1263                 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1264                 intel_ring_emit(ring, offset + len - 8);
1265                 intel_ring_emit(ring, MI_NOOP);
1266                 intel_ring_advance(ring);
1267         } else {
1268                 u32 cs_offset = ring->scratch.gtt_offset;
1269
1270                 if (len > I830_BATCH_LIMIT)
1271                         return -ENOSPC;
1272
1273                 ret = intel_ring_begin(ring, 9+3);
1274                 if (ret)
1275                         return ret;
1276                 /* Blit the batch (which has now all relocs applied) to the stable batch
1277                  * scratch bo area (so that the CS never stumbles over its tlb
1278                  * invalidation bug) ... */
1279                 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1280                                 XY_SRC_COPY_BLT_WRITE_ALPHA |
1281                                 XY_SRC_COPY_BLT_WRITE_RGB);
1282                 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1283                 intel_ring_emit(ring, 0);
1284                 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1285                 intel_ring_emit(ring, cs_offset);
1286                 intel_ring_emit(ring, 0);
1287                 intel_ring_emit(ring, 4096);
1288                 intel_ring_emit(ring, offset);
1289                 intel_ring_emit(ring, MI_FLUSH);
1290
1291                 /* ... and execute it. */
1292                 intel_ring_emit(ring, MI_BATCH_BUFFER);
1293                 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1294                 intel_ring_emit(ring, cs_offset + len - 8);
1295                 intel_ring_advance(ring);
1296         }
1297
1298         return 0;
1299 }
1300
1301 static int
1302 i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1303                          u64 offset, u32 len,
1304                          unsigned flags)
1305 {
1306         int ret;
1307
1308         ret = intel_ring_begin(ring, 2);
1309         if (ret)
1310                 return ret;
1311
1312         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1313         intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1314         intel_ring_advance(ring);
1315
1316         return 0;
1317 }
1318
1319 static void cleanup_status_page(struct intel_engine_cs *ring)
1320 {
1321         struct drm_i915_gem_object *obj;
1322
1323         obj = ring->status_page.obj;
1324         if (obj == NULL)
1325                 return;
1326
1327         kunmap(sg_page(obj->pages->sgl));
1328         i915_gem_object_ggtt_unpin(obj);
1329         drm_gem_object_unreference(&obj->base);
1330         ring->status_page.obj = NULL;
1331 }
1332
1333 static int init_status_page(struct intel_engine_cs *ring)
1334 {
1335         struct drm_i915_gem_object *obj;
1336
1337         if ((obj = ring->status_page.obj) == NULL) {
1338                 int ret;
1339
1340                 obj = i915_gem_alloc_object(ring->dev, 4096);
1341                 if (obj == NULL) {
1342                         DRM_ERROR("Failed to allocate status page\n");
1343                         return -ENOMEM;
1344                 }
1345
1346                 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1347                 if (ret)
1348                         goto err_unref;
1349
1350                 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
1351                 if (ret) {
1352 err_unref:
1353                         drm_gem_object_unreference(&obj->base);
1354                         return ret;
1355                 }
1356
1357                 ring->status_page.obj = obj;
1358         }
1359
1360         ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1361         ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1362         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1363
1364         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1365                         ring->name, ring->status_page.gfx_addr);
1366
1367         return 0;
1368 }
1369
1370 static int init_phys_status_page(struct intel_engine_cs *ring)
1371 {
1372         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1373
1374         if (!dev_priv->status_page_dmah) {
1375                 dev_priv->status_page_dmah =
1376                         drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1377                 if (!dev_priv->status_page_dmah)
1378                         return -ENOMEM;
1379         }
1380
1381         ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1382         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1383
1384         return 0;
1385 }
1386
1387 static int allocate_ring_buffer(struct intel_engine_cs *ring)
1388 {
1389         struct drm_device *dev = ring->dev;
1390         struct drm_i915_private *dev_priv = to_i915(dev);
1391         struct intel_ringbuffer *ringbuf = ring->buffer;
1392         struct drm_i915_gem_object *obj;
1393         int ret;
1394
1395         if (intel_ring_initialized(ring))
1396                 return 0;
1397
1398         obj = NULL;
1399         if (!HAS_LLC(dev))
1400                 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
1401         if (obj == NULL)
1402                 obj = i915_gem_alloc_object(dev, ringbuf->size);
1403         if (obj == NULL)
1404                 return -ENOMEM;
1405
1406         /* mark ring buffers as read-only from GPU side by default */
1407         obj->gt_ro = 1;
1408
1409         ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1410         if (ret)
1411                 goto err_unref;
1412
1413         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1414         if (ret)
1415                 goto err_unpin;
1416
1417         ringbuf->virtual_start =
1418                 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1419                                 ringbuf->size);
1420         if (ringbuf->virtual_start == NULL) {
1421                 ret = -EINVAL;
1422                 goto err_unpin;
1423         }
1424
1425         ringbuf->obj = obj;
1426         return 0;
1427
1428 err_unpin:
1429         i915_gem_object_ggtt_unpin(obj);
1430 err_unref:
1431         drm_gem_object_unreference(&obj->base);
1432         return ret;
1433 }
1434
1435 static int intel_init_ring_buffer(struct drm_device *dev,
1436                                   struct intel_engine_cs *ring)
1437 {
1438         struct intel_ringbuffer *ringbuf = ring->buffer;
1439         int ret;
1440
1441         if (ringbuf == NULL) {
1442                 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1443                 if (!ringbuf)
1444                         return -ENOMEM;
1445                 ring->buffer = ringbuf;
1446         }
1447
1448         ring->dev = dev;
1449         INIT_LIST_HEAD(&ring->active_list);
1450         INIT_LIST_HEAD(&ring->request_list);
1451         ringbuf->size = 32 * PAGE_SIZE;
1452         memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
1453
1454         init_waitqueue_head(&ring->irq_queue);
1455
1456         if (I915_NEED_GFX_HWS(dev)) {
1457                 ret = init_status_page(ring);
1458                 if (ret)
1459                         goto error;
1460         } else {
1461                 BUG_ON(ring->id != RCS);
1462                 ret = init_phys_status_page(ring);
1463                 if (ret)
1464                         goto error;
1465         }
1466
1467         ret = allocate_ring_buffer(ring);
1468         if (ret) {
1469                 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
1470                 goto error;
1471         }
1472
1473         /* Workaround an erratum on the i830 which causes a hang if
1474          * the TAIL pointer points to within the last 2 cachelines
1475          * of the buffer.
1476          */
1477         ringbuf->effective_size = ringbuf->size;
1478         if (IS_I830(dev) || IS_845G(dev))
1479                 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
1480
1481         ret = i915_cmd_parser_init_ring(ring);
1482         if (ret)
1483                 goto error;
1484
1485         ret = ring->init(ring);
1486         if (ret)
1487                 goto error;
1488
1489         return 0;
1490
1491 error:
1492         kfree(ringbuf);
1493         ring->buffer = NULL;
1494         return ret;
1495 }
1496
1497 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1498 {
1499         struct drm_i915_private *dev_priv = to_i915(ring->dev);
1500         struct intel_ringbuffer *ringbuf = ring->buffer;
1501
1502         if (!intel_ring_initialized(ring))
1503                 return;
1504
1505         intel_stop_ring_buffer(ring);
1506         WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1507
1508         iounmap(ringbuf->virtual_start);
1509
1510         i915_gem_object_ggtt_unpin(ringbuf->obj);
1511         drm_gem_object_unreference(&ringbuf->obj->base);
1512         ringbuf->obj = NULL;
1513         ring->preallocated_lazy_request = NULL;
1514         ring->outstanding_lazy_seqno = 0;
1515
1516         if (ring->cleanup)
1517                 ring->cleanup(ring);
1518
1519         cleanup_status_page(ring);
1520
1521         i915_cmd_parser_fini_ring(ring);
1522
1523         kfree(ringbuf);
1524         ring->buffer = NULL;
1525 }
1526
1527 static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1528 {
1529         struct intel_ringbuffer *ringbuf = ring->buffer;
1530         struct drm_i915_gem_request *request;
1531         u32 seqno = 0;
1532         int ret;
1533
1534         if (ringbuf->last_retired_head != -1) {
1535                 ringbuf->head = ringbuf->last_retired_head;
1536                 ringbuf->last_retired_head = -1;
1537
1538                 ringbuf->space = ring_space(ring);
1539                 if (ringbuf->space >= n)
1540                         return 0;
1541         }
1542
1543         list_for_each_entry(request, &ring->request_list, list) {
1544                 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
1545                         seqno = request->seqno;
1546                         break;
1547                 }
1548         }
1549
1550         if (seqno == 0)
1551                 return -ENOSPC;
1552
1553         ret = i915_wait_seqno(ring, seqno);
1554         if (ret)
1555                 return ret;
1556
1557         i915_gem_retire_requests_ring(ring);
1558         ringbuf->head = ringbuf->last_retired_head;
1559         ringbuf->last_retired_head = -1;
1560
1561         ringbuf->space = ring_space(ring);
1562         return 0;
1563 }
1564
1565 static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1566 {
1567         struct drm_device *dev = ring->dev;
1568         struct drm_i915_private *dev_priv = dev->dev_private;
1569         struct intel_ringbuffer *ringbuf = ring->buffer;
1570         unsigned long end;
1571         int ret;
1572
1573         ret = intel_ring_wait_request(ring, n);
1574         if (ret != -ENOSPC)
1575                 return ret;
1576
1577         /* force the tail write in case we have been skipping them */
1578         __intel_ring_advance(ring);
1579
1580         /* With GEM the hangcheck timer should kick us out of the loop,
1581          * leaving it early runs the risk of corrupting GEM state (due
1582          * to running on almost untested codepaths). But on resume
1583          * timers don't work yet, so prevent a complete hang in that
1584          * case by choosing an insanely large timeout. */
1585         end = jiffies + 60 * HZ;
1586
1587         trace_i915_ring_wait_begin(ring);
1588         do {
1589                 ringbuf->head = I915_READ_HEAD(ring);
1590                 ringbuf->space = ring_space(ring);
1591                 if (ringbuf->space >= n) {
1592                         ret = 0;
1593                         break;
1594                 }
1595
1596                 if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
1597                     dev->primary->master) {
1598                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1599                         if (master_priv->sarea_priv)
1600                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1601                 }
1602
1603                 msleep(1);
1604
1605                 if (dev_priv->mm.interruptible && signal_pending(current)) {
1606                         ret = -ERESTARTSYS;
1607                         break;
1608                 }
1609
1610                 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1611                                            dev_priv->mm.interruptible);
1612                 if (ret)
1613                         break;
1614
1615                 if (time_after(jiffies, end)) {
1616                         ret = -EBUSY;
1617                         break;
1618                 }
1619         } while (1);
1620         trace_i915_ring_wait_end(ring);
1621         return ret;
1622 }
1623
1624 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1625 {
1626         uint32_t __iomem *virt;
1627         struct intel_ringbuffer *ringbuf = ring->buffer;
1628         int rem = ringbuf->size - ringbuf->tail;
1629
1630         if (ringbuf->space < rem) {
1631                 int ret = ring_wait_for_space(ring, rem);
1632                 if (ret)
1633                         return ret;
1634         }
1635
1636         virt = ringbuf->virtual_start + ringbuf->tail;
1637         rem /= 4;
1638         while (rem--)
1639                 iowrite32(MI_NOOP, virt++);
1640
1641         ringbuf->tail = 0;
1642         ringbuf->space = ring_space(ring);
1643
1644         return 0;
1645 }
1646
1647 int intel_ring_idle(struct intel_engine_cs *ring)
1648 {
1649         u32 seqno;
1650         int ret;
1651
1652         /* We need to add any requests required to flush the objects and ring */
1653         if (ring->outstanding_lazy_seqno) {
1654                 ret = i915_add_request(ring, NULL);
1655                 if (ret)
1656                         return ret;
1657         }
1658
1659         /* Wait upon the last request to be completed */
1660         if (list_empty(&ring->request_list))
1661                 return 0;
1662
1663         seqno = list_entry(ring->request_list.prev,
1664                            struct drm_i915_gem_request,
1665                            list)->seqno;
1666
1667         return i915_wait_seqno(ring, seqno);
1668 }
1669
1670 static int
1671 intel_ring_alloc_seqno(struct intel_engine_cs *ring)
1672 {
1673         if (ring->outstanding_lazy_seqno)
1674                 return 0;
1675
1676         if (ring->preallocated_lazy_request == NULL) {
1677                 struct drm_i915_gem_request *request;
1678
1679                 request = kmalloc(sizeof(*request), GFP_KERNEL);
1680                 if (request == NULL)
1681                         return -ENOMEM;
1682
1683                 ring->preallocated_lazy_request = request;
1684         }
1685
1686         return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1687 }
1688
1689 static int __intel_ring_prepare(struct intel_engine_cs *ring,
1690                                 int bytes)
1691 {
1692         struct intel_ringbuffer *ringbuf = ring->buffer;
1693         int ret;
1694
1695         if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
1696                 ret = intel_wrap_ring_buffer(ring);
1697                 if (unlikely(ret))
1698                         return ret;
1699         }
1700
1701         if (unlikely(ringbuf->space < bytes)) {
1702                 ret = ring_wait_for_space(ring, bytes);
1703                 if (unlikely(ret))
1704                         return ret;
1705         }
1706
1707         return 0;
1708 }
1709
1710 int intel_ring_begin(struct intel_engine_cs *ring,
1711                      int num_dwords)
1712 {
1713         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1714         int ret;
1715
1716         ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1717                                    dev_priv->mm.interruptible);
1718         if (ret)
1719                 return ret;
1720
1721         ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
1722         if (ret)
1723                 return ret;
1724
1725         /* Preallocate the olr before touching the ring */
1726         ret = intel_ring_alloc_seqno(ring);
1727         if (ret)
1728                 return ret;
1729
1730         ring->buffer->space -= num_dwords * sizeof(uint32_t);
1731         return 0;
1732 }
1733
1734 /* Align the ring tail to a cacheline boundary */
1735 int intel_ring_cacheline_align(struct intel_engine_cs *ring)
1736 {
1737         int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1738         int ret;
1739
1740         if (num_dwords == 0)
1741                 return 0;
1742
1743         num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
1744         ret = intel_ring_begin(ring, num_dwords);
1745         if (ret)
1746                 return ret;
1747
1748         while (num_dwords--)
1749                 intel_ring_emit(ring, MI_NOOP);
1750
1751         intel_ring_advance(ring);
1752
1753         return 0;
1754 }
1755
1756 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
1757 {
1758         struct drm_device *dev = ring->dev;
1759         struct drm_i915_private *dev_priv = dev->dev_private;
1760
1761         BUG_ON(ring->outstanding_lazy_seqno);
1762
1763         if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
1764                 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1765                 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1766                 if (HAS_VEBOX(dev))
1767                         I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1768         }
1769
1770         ring->set_seqno(ring, seqno);
1771         ring->hangcheck.seqno = seqno;
1772 }
1773
1774 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
1775                                      u32 value)
1776 {
1777         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1778
1779        /* Every tail move must follow the sequence below */
1780
1781         /* Disable notification that the ring is IDLE. The GT
1782          * will then assume that it is busy and bring it out of rc6.
1783          */
1784         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1785                    _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1786
1787         /* Clear the context id. Here be magic! */
1788         I915_WRITE64(GEN6_BSD_RNCID, 0x0);
1789
1790         /* Wait for the ring not to be idle, i.e. for it to wake up. */
1791         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1792                       GEN6_BSD_SLEEP_INDICATOR) == 0,
1793                      50))
1794                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1795
1796         /* Now that the ring is fully powered up, update the tail */
1797         I915_WRITE_TAIL(ring, value);
1798         POSTING_READ(RING_TAIL(ring->mmio_base));
1799
1800         /* Let the ring send IDLE messages to the GT again,
1801          * and so let it sleep to conserve power when idle.
1802          */
1803         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1804                    _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1805 }
1806
1807 static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
1808                                u32 invalidate, u32 flush)
1809 {
1810         uint32_t cmd;
1811         int ret;
1812
1813         ret = intel_ring_begin(ring, 4);
1814         if (ret)
1815                 return ret;
1816
1817         cmd = MI_FLUSH_DW;
1818         if (INTEL_INFO(ring->dev)->gen >= 8)
1819                 cmd += 1;
1820         /*
1821          * Bspec vol 1c.5 - video engine command streamer:
1822          * "If ENABLED, all TLBs will be invalidated once the flush
1823          * operation is complete. This bit is only valid when the
1824          * Post-Sync Operation field is a value of 1h or 3h."
1825          */
1826         if (invalidate & I915_GEM_GPU_DOMAINS)
1827                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1828                         MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1829         intel_ring_emit(ring, cmd);
1830         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1831         if (INTEL_INFO(ring->dev)->gen >= 8) {
1832                 intel_ring_emit(ring, 0); /* upper addr */
1833                 intel_ring_emit(ring, 0); /* value */
1834         } else  {
1835                 intel_ring_emit(ring, 0);
1836                 intel_ring_emit(ring, MI_NOOP);
1837         }
1838         intel_ring_advance(ring);
1839         return 0;
1840 }
1841
1842 static int
1843 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1844                               u64 offset, u32 len,
1845                               unsigned flags)
1846 {
1847         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1848         bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1849                 !(flags & I915_DISPATCH_SECURE);
1850         int ret;
1851
1852         ret = intel_ring_begin(ring, 4);
1853         if (ret)
1854                 return ret;
1855
1856         /* FIXME(BDW): Address space and security selectors. */
1857         intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1858         intel_ring_emit(ring, lower_32_bits(offset));
1859         intel_ring_emit(ring, upper_32_bits(offset));
1860         intel_ring_emit(ring, MI_NOOP);
1861         intel_ring_advance(ring);
1862
1863         return 0;
1864 }
1865
1866 static int
1867 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1868                               u64 offset, u32 len,
1869                               unsigned flags)
1870 {
1871         int ret;
1872
1873         ret = intel_ring_begin(ring, 2);
1874         if (ret)
1875                 return ret;
1876
1877         intel_ring_emit(ring,
1878                         MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1879                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1880         /* bit0-7 is the length on GEN6+ */
1881         intel_ring_emit(ring, offset);
1882         intel_ring_advance(ring);
1883
1884         return 0;
1885 }
1886
1887 static int
1888 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1889                               u64 offset, u32 len,
1890                               unsigned flags)
1891 {
1892         int ret;
1893
1894         ret = intel_ring_begin(ring, 2);
1895         if (ret)
1896                 return ret;
1897
1898         intel_ring_emit(ring,
1899                         MI_BATCH_BUFFER_START |
1900                         (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1901         /* bit0-7 is the length on GEN6+ */
1902         intel_ring_emit(ring, offset);
1903         intel_ring_advance(ring);
1904
1905         return 0;
1906 }
1907
1908 /* Blitter support (SandyBridge+) */
1909
1910 static int gen6_ring_flush(struct intel_engine_cs *ring,
1911                            u32 invalidate, u32 flush)
1912 {
1913         struct drm_device *dev = ring->dev;
1914         uint32_t cmd;
1915         int ret;
1916
1917         ret = intel_ring_begin(ring, 4);
1918         if (ret)
1919                 return ret;
1920
1921         cmd = MI_FLUSH_DW;
1922         if (INTEL_INFO(ring->dev)->gen >= 8)
1923                 cmd += 1;
1924         /*
1925          * Bspec vol 1c.3 - blitter engine command streamer:
1926          * "If ENABLED, all TLBs will be invalidated once the flush
1927          * operation is complete. This bit is only valid when the
1928          * Post-Sync Operation field is a value of 1h or 3h."
1929          */
1930         if (invalidate & I915_GEM_DOMAIN_RENDER)
1931                 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1932                         MI_FLUSH_DW_OP_STOREDW;
1933         intel_ring_emit(ring, cmd);
1934         intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1935         if (INTEL_INFO(ring->dev)->gen >= 8) {
1936                 intel_ring_emit(ring, 0); /* upper addr */
1937                 intel_ring_emit(ring, 0); /* value */
1938         } else  {
1939                 intel_ring_emit(ring, 0);
1940                 intel_ring_emit(ring, MI_NOOP);
1941         }
1942         intel_ring_advance(ring);
1943
1944         if (IS_GEN7(dev) && !invalidate && flush)
1945                 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
1946
1947         return 0;
1948 }
1949
1950 int intel_init_render_ring_buffer(struct drm_device *dev)
1951 {
1952         struct drm_i915_private *dev_priv = dev->dev_private;
1953         struct intel_engine_cs *ring = &dev_priv->ring[RCS];
1954
1955         ring->name = "render ring";
1956         ring->id = RCS;
1957         ring->mmio_base = RENDER_RING_BASE;
1958
1959         if (INTEL_INFO(dev)->gen >= 8) {
1960                 ring->add_request = gen6_add_request;
1961                 ring->flush = gen8_render_ring_flush;
1962                 ring->irq_get = gen8_ring_get_irq;
1963                 ring->irq_put = gen8_ring_put_irq;
1964                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1965                 ring->get_seqno = gen6_ring_get_seqno;
1966                 ring->set_seqno = ring_set_seqno;
1967                 if (i915_semaphore_is_enabled(dev)) {
1968                         ring->semaphore.sync_to = gen6_ring_sync;
1969                         ring->semaphore.signal = gen6_signal;
1970                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1971                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
1972                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
1973                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
1974                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
1975                         ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
1976                         ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
1977                         ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
1978                         ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
1979                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
1980                 }
1981         } else if (INTEL_INFO(dev)->gen >= 6) {
1982                 ring->add_request = gen6_add_request;
1983                 ring->flush = gen7_render_ring_flush;
1984                 if (INTEL_INFO(dev)->gen == 6)
1985                         ring->flush = gen6_render_ring_flush;
1986                 ring->irq_get = gen6_ring_get_irq;
1987                 ring->irq_put = gen6_ring_put_irq;
1988                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1989                 ring->get_seqno = gen6_ring_get_seqno;
1990                 ring->set_seqno = ring_set_seqno;
1991                 if (i915_semaphore_is_enabled(dev)) {
1992                         ring->semaphore.sync_to = gen6_ring_sync;
1993                         ring->semaphore.signal = gen6_signal;
1994                         /*
1995                          * The current semaphore is only applied on pre-gen8
1996                          * platform.  And there is no VCS2 ring on the pre-gen8
1997                          * platform. So the semaphore between RCS and VCS2 is
1998                          * initialized as INVALID.  Gen8 will initialize the
1999                          * sema between VCS2 and RCS later.
2000                          */
2001                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2002                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
2003                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
2004                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
2005                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2006                         ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2007                         ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
2008                         ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2009                         ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2010                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2011                 }
2012         } else if (IS_GEN5(dev)) {
2013                 ring->add_request = pc_render_add_request;
2014                 ring->flush = gen4_render_ring_flush;
2015                 ring->get_seqno = pc_render_get_seqno;
2016                 ring->set_seqno = pc_render_set_seqno;
2017                 ring->irq_get = gen5_ring_get_irq;
2018                 ring->irq_put = gen5_ring_put_irq;
2019                 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
2020                                         GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2021         } else {
2022                 ring->add_request = i9xx_add_request;
2023                 if (INTEL_INFO(dev)->gen < 4)
2024                         ring->flush = gen2_render_ring_flush;
2025                 else
2026                         ring->flush = gen4_render_ring_flush;
2027                 ring->get_seqno = ring_get_seqno;
2028                 ring->set_seqno = ring_set_seqno;
2029                 if (IS_GEN2(dev)) {
2030                         ring->irq_get = i8xx_ring_get_irq;
2031                         ring->irq_put = i8xx_ring_put_irq;
2032                 } else {
2033                         ring->irq_get = i9xx_ring_get_irq;
2034                         ring->irq_put = i9xx_ring_put_irq;
2035                 }
2036                 ring->irq_enable_mask = I915_USER_INTERRUPT;
2037         }
2038         ring->write_tail = ring_write_tail;
2039
2040         if (IS_HASWELL(dev))
2041                 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2042         else if (IS_GEN8(dev))
2043                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2044         else if (INTEL_INFO(dev)->gen >= 6)
2045                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2046         else if (INTEL_INFO(dev)->gen >= 4)
2047                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2048         else if (IS_I830(dev) || IS_845G(dev))
2049                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2050         else
2051                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2052         ring->init = init_render_ring;
2053         ring->cleanup = render_ring_cleanup;
2054
2055         /* Workaround batchbuffer to combat CS tlb bug. */
2056         if (HAS_BROKEN_CS_TLB(dev)) {
2057                 struct drm_i915_gem_object *obj;
2058                 int ret;
2059
2060                 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
2061                 if (obj == NULL) {
2062                         DRM_ERROR("Failed to allocate batch bo\n");
2063                         return -ENOMEM;
2064                 }
2065
2066                 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
2067                 if (ret != 0) {
2068                         drm_gem_object_unreference(&obj->base);
2069                         DRM_ERROR("Failed to ping batch bo\n");
2070                         return ret;
2071                 }
2072
2073                 ring->scratch.obj = obj;
2074                 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
2075         }
2076
2077         return intel_init_ring_buffer(dev, ring);
2078 }
2079
2080 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2081 {
2082         struct drm_i915_private *dev_priv = dev->dev_private;
2083         struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2084         struct intel_ringbuffer *ringbuf = ring->buffer;
2085         int ret;
2086
2087         if (ringbuf == NULL) {
2088                 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2089                 if (!ringbuf)
2090                         return -ENOMEM;
2091                 ring->buffer = ringbuf;
2092         }
2093
2094         ring->name = "render ring";
2095         ring->id = RCS;
2096         ring->mmio_base = RENDER_RING_BASE;
2097
2098         if (INTEL_INFO(dev)->gen >= 6) {
2099                 /* non-kms not supported on gen6+ */
2100                 ret = -ENODEV;
2101                 goto err_ringbuf;
2102         }
2103
2104         /* Note: gem is not supported on gen5/ilk without kms (the corresponding
2105          * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
2106          * the special gen5 functions. */
2107         ring->add_request = i9xx_add_request;
2108         if (INTEL_INFO(dev)->gen < 4)
2109                 ring->flush = gen2_render_ring_flush;
2110         else
2111                 ring->flush = gen4_render_ring_flush;
2112         ring->get_seqno = ring_get_seqno;
2113         ring->set_seqno = ring_set_seqno;
2114         if (IS_GEN2(dev)) {
2115                 ring->irq_get = i8xx_ring_get_irq;
2116                 ring->irq_put = i8xx_ring_put_irq;
2117         } else {
2118                 ring->irq_get = i9xx_ring_get_irq;
2119                 ring->irq_put = i9xx_ring_put_irq;
2120         }
2121         ring->irq_enable_mask = I915_USER_INTERRUPT;
2122         ring->write_tail = ring_write_tail;
2123         if (INTEL_INFO(dev)->gen >= 4)
2124                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2125         else if (IS_I830(dev) || IS_845G(dev))
2126                 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2127         else
2128                 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2129         ring->init = init_render_ring;
2130         ring->cleanup = render_ring_cleanup;
2131
2132         ring->dev = dev;
2133         INIT_LIST_HEAD(&ring->active_list);
2134         INIT_LIST_HEAD(&ring->request_list);
2135
2136         ringbuf->size = size;
2137         ringbuf->effective_size = ringbuf->size;
2138         if (IS_I830(ring->dev) || IS_845G(ring->dev))
2139                 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
2140
2141         ringbuf->virtual_start = ioremap_wc(start, size);
2142         if (ringbuf->virtual_start == NULL) {
2143                 DRM_ERROR("can not ioremap virtual address for"
2144                           " ring buffer\n");
2145                 ret = -ENOMEM;
2146                 goto err_ringbuf;
2147         }
2148
2149         if (!I915_NEED_GFX_HWS(dev)) {
2150                 ret = init_phys_status_page(ring);
2151                 if (ret)
2152                         goto err_vstart;
2153         }
2154
2155         return 0;
2156
2157 err_vstart:
2158         iounmap(ringbuf->virtual_start);
2159 err_ringbuf:
2160         kfree(ringbuf);
2161         ring->buffer = NULL;
2162         return ret;
2163 }
2164
2165 int intel_init_bsd_ring_buffer(struct drm_device *dev)
2166 {
2167         struct drm_i915_private *dev_priv = dev->dev_private;
2168         struct intel_engine_cs *ring = &dev_priv->ring[VCS];
2169
2170         ring->name = "bsd ring";
2171         ring->id = VCS;
2172
2173         ring->write_tail = ring_write_tail;
2174         if (INTEL_INFO(dev)->gen >= 6) {
2175                 ring->mmio_base = GEN6_BSD_RING_BASE;
2176                 /* gen6 bsd needs a special wa for tail updates */
2177                 if (IS_GEN6(dev))
2178                         ring->write_tail = gen6_bsd_ring_write_tail;
2179                 ring->flush = gen6_bsd_ring_flush;
2180                 ring->add_request = gen6_add_request;
2181                 ring->get_seqno = gen6_ring_get_seqno;
2182                 ring->set_seqno = ring_set_seqno;
2183                 if (INTEL_INFO(dev)->gen >= 8) {
2184                         ring->irq_enable_mask =
2185                                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2186                         ring->irq_get = gen8_ring_get_irq;
2187                         ring->irq_put = gen8_ring_put_irq;
2188                         ring->dispatch_execbuffer =
2189                                 gen8_ring_dispatch_execbuffer;
2190                         if (i915_semaphore_is_enabled(dev)) {
2191                                 ring->semaphore.sync_to = gen6_ring_sync;
2192                                 ring->semaphore.signal = gen6_signal;
2193                                 /*
2194                                  * The current semaphore is only applied on
2195                                  * pre-gen8 platform.  And there is no VCS2 ring
2196                                  * on the pre-gen8 platform. So the semaphore
2197                                  * between VCS and VCS2 is initialized as
2198                                  * INVALID.  Gen8 will initialize the sema
2199                                  * between VCS2 and VCS later.
2200                                  */
2201                                 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2202                                 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2203                                 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2204                                 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2205                                 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2206                                 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2207                                 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2208                                 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2209                                 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2210                                 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2211                         }
2212                 } else {
2213                         ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2214                         ring->irq_get = gen6_ring_get_irq;
2215                         ring->irq_put = gen6_ring_put_irq;
2216                         ring->dispatch_execbuffer =
2217                                 gen6_ring_dispatch_execbuffer;
2218                         if (i915_semaphore_is_enabled(dev)) {
2219                                 ring->semaphore.sync_to = gen6_ring_sync;
2220                                 ring->semaphore.signal = gen6_signal;
2221                                 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
2222                                 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2223                                 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
2224                                 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
2225                                 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2226                                 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
2227                                 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2228                                 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
2229                                 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
2230                                 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2231                         }
2232                 }
2233         } else {
2234                 ring->mmio_base = BSD_RING_BASE;
2235                 ring->flush = bsd_ring_flush;
2236                 ring->add_request = i9xx_add_request;
2237                 ring->get_seqno = ring_get_seqno;
2238                 ring->set_seqno = ring_set_seqno;
2239                 if (IS_GEN5(dev)) {
2240                         ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2241                         ring->irq_get = gen5_ring_get_irq;
2242                         ring->irq_put = gen5_ring_put_irq;
2243                 } else {
2244                         ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2245                         ring->irq_get = i9xx_ring_get_irq;
2246                         ring->irq_put = i9xx_ring_put_irq;
2247                 }
2248                 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2249         }
2250         ring->init = init_ring_common;
2251
2252         return intel_init_ring_buffer(dev, ring);
2253 }
2254
2255 /**
2256  * Initialize the second BSD ring for Broadwell GT3.
2257  * It is noted that this only exists on Broadwell GT3.
2258  */
2259 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
2260 {
2261         struct drm_i915_private *dev_priv = dev->dev_private;
2262         struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
2263
2264         if ((INTEL_INFO(dev)->gen != 8)) {
2265                 DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
2266                 return -EINVAL;
2267         }
2268
2269         ring->name = "bsd2 ring";
2270         ring->id = VCS2;
2271
2272         ring->write_tail = ring_write_tail;
2273         ring->mmio_base = GEN8_BSD2_RING_BASE;
2274         ring->flush = gen6_bsd_ring_flush;
2275         ring->add_request = gen6_add_request;
2276         ring->get_seqno = gen6_ring_get_seqno;
2277         ring->set_seqno = ring_set_seqno;
2278         ring->irq_enable_mask =
2279                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
2280         ring->irq_get = gen8_ring_get_irq;
2281         ring->irq_put = gen8_ring_put_irq;
2282         ring->dispatch_execbuffer =
2283                         gen8_ring_dispatch_execbuffer;
2284         ring->semaphore.sync_to = gen6_ring_sync;
2285         ring->semaphore.signal = gen6_signal;
2286         /*
2287          * The current semaphore is only applied on the pre-gen8. And there
2288          * is no bsd2 ring on the pre-gen8. So now the semaphore_register
2289          * between VCS2 and other ring is initialized as invalid.
2290          * Gen8 will initialize the sema between VCS2 and other ring later.
2291          */
2292         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2293         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2294         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2295         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2296         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2297         ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2298         ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2299         ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2300         ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2301         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2302
2303         ring->init = init_ring_common;
2304
2305         return intel_init_ring_buffer(dev, ring);
2306 }
2307
2308 int intel_init_blt_ring_buffer(struct drm_device *dev)
2309 {
2310         struct drm_i915_private *dev_priv = dev->dev_private;
2311         struct intel_engine_cs *ring = &dev_priv->ring[BCS];
2312
2313         ring->name = "blitter ring";
2314         ring->id = BCS;
2315
2316         ring->mmio_base = BLT_RING_BASE;
2317         ring->write_tail = ring_write_tail;
2318         ring->flush = gen6_ring_flush;
2319         ring->add_request = gen6_add_request;
2320         ring->get_seqno = gen6_ring_get_seqno;
2321         ring->set_seqno = ring_set_seqno;
2322         if (INTEL_INFO(dev)->gen >= 8) {
2323                 ring->irq_enable_mask =
2324                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2325                 ring->irq_get = gen8_ring_get_irq;
2326                 ring->irq_put = gen8_ring_put_irq;
2327                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2328                 if (i915_semaphore_is_enabled(dev)) {
2329                         ring->semaphore.sync_to = gen6_ring_sync;
2330                         ring->semaphore.signal = gen6_signal;
2331                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2332                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2333                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2334                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2335                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2336                         ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2337                         ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2338                         ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2339                         ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2340                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2341                 }
2342         } else {
2343                 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2344                 ring->irq_get = gen6_ring_get_irq;
2345                 ring->irq_put = gen6_ring_put_irq;
2346                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2347                 if (i915_semaphore_is_enabled(dev)) {
2348                         ring->semaphore.signal = gen6_signal;
2349                         ring->semaphore.sync_to = gen6_ring_sync;
2350                         /*
2351                          * The current semaphore is only applied on pre-gen8
2352                          * platform.  And there is no VCS2 ring on the pre-gen8
2353                          * platform. So the semaphore between BCS and VCS2 is
2354                          * initialized as INVALID.  Gen8 will initialize the
2355                          * sema between BCS and VCS2 later.
2356                          */
2357                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
2358                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
2359                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2360                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
2361                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2362                         ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
2363                         ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
2364                         ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2365                         ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
2366                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2367                 }
2368         }
2369         ring->init = init_ring_common;
2370
2371         return intel_init_ring_buffer(dev, ring);
2372 }
2373
2374 int intel_init_vebox_ring_buffer(struct drm_device *dev)
2375 {
2376         struct drm_i915_private *dev_priv = dev->dev_private;
2377         struct intel_engine_cs *ring = &dev_priv->ring[VECS];
2378
2379         ring->name = "video enhancement ring";
2380         ring->id = VECS;
2381
2382         ring->mmio_base = VEBOX_RING_BASE;
2383         ring->write_tail = ring_write_tail;
2384         ring->flush = gen6_ring_flush;
2385         ring->add_request = gen6_add_request;
2386         ring->get_seqno = gen6_ring_get_seqno;
2387         ring->set_seqno = ring_set_seqno;
2388
2389         if (INTEL_INFO(dev)->gen >= 8) {
2390                 ring->irq_enable_mask =
2391                         GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2392                 ring->irq_get = gen8_ring_get_irq;
2393                 ring->irq_put = gen8_ring_put_irq;
2394                 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2395                 if (i915_semaphore_is_enabled(dev)) {
2396                         ring->semaphore.sync_to = gen6_ring_sync;
2397                         ring->semaphore.signal = gen6_signal;
2398                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2399                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2400                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2401                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2402                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2403                         ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2404                         ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
2405                         ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
2406                         ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2407                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2408                 }
2409         } else {
2410                 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2411                 ring->irq_get = hsw_vebox_get_irq;
2412                 ring->irq_put = hsw_vebox_put_irq;
2413                 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2414                 if (i915_semaphore_is_enabled(dev)) {
2415                         ring->semaphore.sync_to = gen6_ring_sync;
2416                         ring->semaphore.signal = gen6_signal;
2417                         ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
2418                         ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
2419                         ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
2420                         ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2421                         ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2422                         ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
2423                         ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
2424                         ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
2425                         ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
2426                         ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2427                 }
2428         }
2429         ring->init = init_ring_common;
2430
2431         return intel_init_ring_buffer(dev, ring);
2432 }
2433
2434 int
2435 intel_ring_flush_all_caches(struct intel_engine_cs *ring)
2436 {
2437         int ret;
2438
2439         if (!ring->gpu_caches_dirty)
2440                 return 0;
2441
2442         ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
2443         if (ret)
2444                 return ret;
2445
2446         trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
2447
2448         ring->gpu_caches_dirty = false;
2449         return 0;
2450 }
2451
2452 int
2453 intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
2454 {
2455         uint32_t flush_domains;
2456         int ret;
2457
2458         flush_domains = 0;
2459         if (ring->gpu_caches_dirty)
2460                 flush_domains = I915_GEM_GPU_DOMAINS;
2461
2462         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2463         if (ret)
2464                 return ret;
2465
2466         trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2467
2468         ring->gpu_caches_dirty = false;
2469         return 0;
2470 }
2471
2472 void
2473 intel_stop_ring_buffer(struct intel_engine_cs *ring)
2474 {
2475         int ret;
2476
2477         if (!intel_ring_initialized(ring))
2478                 return;
2479
2480         ret = intel_ring_idle(ring);
2481         if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
2482                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
2483                           ring->name, ret);
2484
2485         stop_ring(ring);
2486 }