]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_ringbuffer.c
38a77938d591e4730026310a0f42ed7f2439c2f9
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 /*
38  * 965+ support PIPE_CONTROL commands, which provide finer grained control
39  * over cache flushing.
40  */
41 struct pipe_control {
42         struct drm_i915_gem_object *obj;
43         volatile u32 *cpu_page;
44         u32 gtt_offset;
45 };
46
47 static inline int ring_space(struct intel_ring_buffer *ring)
48 {
49         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50         if (space < 0)
51                 space += ring->size;
52         return space;
53 }
54
55 static int
56 render_ring_flush(struct intel_ring_buffer *ring,
57                   u32   invalidate_domains,
58                   u32   flush_domains)
59 {
60         struct drm_device *dev = ring->dev;
61         u32 cmd;
62         int ret;
63
64         /*
65          * read/write caches:
66          *
67          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
68          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
69          * also flushed at 2d versus 3d pipeline switches.
70          *
71          * read-only caches:
72          *
73          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
74          * MI_READ_FLUSH is set, and is always flushed on 965.
75          *
76          * I915_GEM_DOMAIN_COMMAND may not exist?
77          *
78          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
79          * invalidated when MI_EXE_FLUSH is set.
80          *
81          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
82          * invalidated with every MI_FLUSH.
83          *
84          * TLBs:
85          *
86          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
87          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
88          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
89          * are flushed at any MI_FLUSH.
90          */
91
92         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
93         if ((invalidate_domains|flush_domains) &
94             I915_GEM_DOMAIN_RENDER)
95                 cmd &= ~MI_NO_WRITE_FLUSH;
96         if (INTEL_INFO(dev)->gen < 4) {
97                 /*
98                  * On the 965, the sampler cache always gets flushed
99                  * and this bit is reserved.
100                  */
101                 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
102                         cmd |= MI_READ_FLUSH;
103         }
104         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
105                 cmd |= MI_EXE_FLUSH;
106
107         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
108             (IS_G4X(dev) || IS_GEN5(dev)))
109                 cmd |= MI_INVALIDATE_ISP;
110
111         ret = intel_ring_begin(ring, 2);
112         if (ret)
113                 return ret;
114
115         intel_ring_emit(ring, cmd);
116         intel_ring_emit(ring, MI_NOOP);
117         intel_ring_advance(ring);
118
119         return 0;
120 }
121
122 /**
123  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
124  * implementing two workarounds on gen6.  From section 1.4.7.1
125  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
126  *
127  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
128  * produced by non-pipelined state commands), software needs to first
129  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
130  * 0.
131  *
132  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
133  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
134  *
135  * And the workaround for these two requires this workaround first:
136  *
137  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
138  * BEFORE the pipe-control with a post-sync op and no write-cache
139  * flushes.
140  *
141  * And this last workaround is tricky because of the requirements on
142  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
143  * volume 2 part 1:
144  *
145  *     "1 of the following must also be set:
146  *      - Render Target Cache Flush Enable ([12] of DW1)
147  *      - Depth Cache Flush Enable ([0] of DW1)
148  *      - Stall at Pixel Scoreboard ([1] of DW1)
149  *      - Depth Stall ([13] of DW1)
150  *      - Post-Sync Operation ([13] of DW1)
151  *      - Notify Enable ([8] of DW1)"
152  *
153  * The cache flushes require the workaround flush that triggered this
154  * one, so we can't use it.  Depth stall would trigger the same.
155  * Post-sync nonzero is what triggered this second workaround, so we
156  * can't use that one either.  Notify enable is IRQs, which aren't
157  * really our business.  That leaves only stall at scoreboard.
158  */
159 static int
160 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
161 {
162         struct pipe_control *pc = ring->private;
163         u32 scratch_addr = pc->gtt_offset + 128;
164         int ret;
165
166
167         ret = intel_ring_begin(ring, 6);
168         if (ret)
169                 return ret;
170
171         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
172         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
173                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
174         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
175         intel_ring_emit(ring, 0); /* low dword */
176         intel_ring_emit(ring, 0); /* high dword */
177         intel_ring_emit(ring, MI_NOOP);
178         intel_ring_advance(ring);
179
180         ret = intel_ring_begin(ring, 6);
181         if (ret)
182                 return ret;
183
184         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
185         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
186         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
187         intel_ring_emit(ring, 0);
188         intel_ring_emit(ring, 0);
189         intel_ring_emit(ring, MI_NOOP);
190         intel_ring_advance(ring);
191
192         return 0;
193 }
194
195 static int
196 gen6_render_ring_flush(struct intel_ring_buffer *ring,
197                          u32 invalidate_domains, u32 flush_domains)
198 {
199         u32 flags = 0;
200         struct pipe_control *pc = ring->private;
201         u32 scratch_addr = pc->gtt_offset + 128;
202         int ret;
203
204         /* Force SNB workarounds for PIPE_CONTROL flushes */
205         intel_emit_post_sync_nonzero_flush(ring);
206
207         /* Just flush everything.  Experiments have shown that reducing the
208          * number of bits based on the write domains has little performance
209          * impact.
210          */
211         flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
212         flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
213         flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
214         flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
215         flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
216         flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
217         flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
218
219         ret = intel_ring_begin(ring, 6);
220         if (ret)
221                 return ret;
222
223         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
224         intel_ring_emit(ring, flags);
225         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
226         intel_ring_emit(ring, 0); /* lower dword */
227         intel_ring_emit(ring, 0); /* uppwer dword */
228         intel_ring_emit(ring, MI_NOOP);
229         intel_ring_advance(ring);
230
231         return 0;
232 }
233
234 static void ring_write_tail(struct intel_ring_buffer *ring,
235                             u32 value)
236 {
237         drm_i915_private_t *dev_priv = ring->dev->dev_private;
238         I915_WRITE_TAIL(ring, value);
239 }
240
241 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
242 {
243         drm_i915_private_t *dev_priv = ring->dev->dev_private;
244         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
245                         RING_ACTHD(ring->mmio_base) : ACTHD;
246
247         return I915_READ(acthd_reg);
248 }
249
250 static int init_ring_common(struct intel_ring_buffer *ring)
251 {
252         struct drm_device *dev = ring->dev;
253         drm_i915_private_t *dev_priv = dev->dev_private;
254         struct drm_i915_gem_object *obj = ring->obj;
255         int ret = 0;
256         u32 head;
257
258         if (HAS_FORCE_WAKE(dev))
259                 gen6_gt_force_wake_get(dev_priv);
260
261         /* Stop the ring if it's running. */
262         I915_WRITE_CTL(ring, 0);
263         I915_WRITE_HEAD(ring, 0);
264         ring->write_tail(ring, 0);
265
266         head = I915_READ_HEAD(ring) & HEAD_ADDR;
267
268         /* G45 ring initialization fails to reset head to zero */
269         if (head != 0) {
270                 DRM_DEBUG_KMS("%s head not reset to zero "
271                               "ctl %08x head %08x tail %08x start %08x\n",
272                               ring->name,
273                               I915_READ_CTL(ring),
274                               I915_READ_HEAD(ring),
275                               I915_READ_TAIL(ring),
276                               I915_READ_START(ring));
277
278                 I915_WRITE_HEAD(ring, 0);
279
280                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
281                         DRM_ERROR("failed to set %s head to zero "
282                                   "ctl %08x head %08x tail %08x start %08x\n",
283                                   ring->name,
284                                   I915_READ_CTL(ring),
285                                   I915_READ_HEAD(ring),
286                                   I915_READ_TAIL(ring),
287                                   I915_READ_START(ring));
288                 }
289         }
290
291         /* Initialize the ring. This must happen _after_ we've cleared the ring
292          * registers with the above sequence (the readback of the HEAD registers
293          * also enforces ordering), otherwise the hw might lose the new ring
294          * register values. */
295         I915_WRITE_START(ring, obj->gtt_offset);
296         I915_WRITE_CTL(ring,
297                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
298                         | RING_VALID);
299
300         /* If the head is still not zero, the ring is dead */
301         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
302                      I915_READ_START(ring) == obj->gtt_offset &&
303                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
304                 DRM_ERROR("%s initialization failed "
305                                 "ctl %08x head %08x tail %08x start %08x\n",
306                                 ring->name,
307                                 I915_READ_CTL(ring),
308                                 I915_READ_HEAD(ring),
309                                 I915_READ_TAIL(ring),
310                                 I915_READ_START(ring));
311                 ret = -EIO;
312                 goto out;
313         }
314
315         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
316                 i915_kernel_lost_context(ring->dev);
317         else {
318                 ring->head = I915_READ_HEAD(ring);
319                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
320                 ring->space = ring_space(ring);
321         }
322
323 out:
324         if (HAS_FORCE_WAKE(dev))
325                 gen6_gt_force_wake_put(dev_priv);
326
327         return ret;
328 }
329
330 static int
331 init_pipe_control(struct intel_ring_buffer *ring)
332 {
333         struct pipe_control *pc;
334         struct drm_i915_gem_object *obj;
335         int ret;
336
337         if (ring->private)
338                 return 0;
339
340         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
341         if (!pc)
342                 return -ENOMEM;
343
344         obj = i915_gem_alloc_object(ring->dev, 4096);
345         if (obj == NULL) {
346                 DRM_ERROR("Failed to allocate seqno page\n");
347                 ret = -ENOMEM;
348                 goto err;
349         }
350
351         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
352
353         ret = i915_gem_object_pin(obj, 4096, true);
354         if (ret)
355                 goto err_unref;
356
357         pc->gtt_offset = obj->gtt_offset;
358         pc->cpu_page =  kmap(obj->pages[0]);
359         if (pc->cpu_page == NULL)
360                 goto err_unpin;
361
362         pc->obj = obj;
363         ring->private = pc;
364         return 0;
365
366 err_unpin:
367         i915_gem_object_unpin(obj);
368 err_unref:
369         drm_gem_object_unreference(&obj->base);
370 err:
371         kfree(pc);
372         return ret;
373 }
374
375 static void
376 cleanup_pipe_control(struct intel_ring_buffer *ring)
377 {
378         struct pipe_control *pc = ring->private;
379         struct drm_i915_gem_object *obj;
380
381         if (!ring->private)
382                 return;
383
384         obj = pc->obj;
385         kunmap(obj->pages[0]);
386         i915_gem_object_unpin(obj);
387         drm_gem_object_unreference(&obj->base);
388
389         kfree(pc);
390         ring->private = NULL;
391 }
392
393 static int init_render_ring(struct intel_ring_buffer *ring)
394 {
395         struct drm_device *dev = ring->dev;
396         struct drm_i915_private *dev_priv = dev->dev_private;
397         int ret = init_ring_common(ring);
398
399         if (INTEL_INFO(dev)->gen > 3) {
400                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
401                 I915_WRITE(MI_MODE, mode);
402         }
403
404         /* We need to disable the AsyncFlip performance optimisations in order
405          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
406          * programmed to '1' on all products.
407          */
408         if (INTEL_INFO(dev)->gen >= 6)
409                 I915_WRITE(MI_MODE, GFX_MODE_ENABLE(ASYNC_FLIP_PERF_DISABLE));
410
411         if (IS_GEN7(dev))
412                 I915_WRITE(GFX_MODE_GEN7,
413                            GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
414                            GFX_MODE_ENABLE(GFX_REPLAY_MODE));
415
416         if (INTEL_INFO(dev)->gen >= 5) {
417                 ret = init_pipe_control(ring);
418                 if (ret)
419                         return ret;
420         }
421
422
423         if (IS_GEN6(dev)) {
424                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
425                  * "If this bit is set, STCunit will have LRA as replacement
426                  *  policy. [...] This bit must be reset.  LRA replacement
427                  *  policy is not supported."
428                  */
429                 I915_WRITE(CACHE_MODE_0,
430                            CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
431         }
432
433         if (INTEL_INFO(dev)->gen >= 6) {
434                 I915_WRITE(INSTPM,
435                            INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
436         }
437
438         return ret;
439 }
440
441 static void render_ring_cleanup(struct intel_ring_buffer *ring)
442 {
443         if (!ring->private)
444                 return;
445
446         cleanup_pipe_control(ring);
447 }
448
449 static void
450 update_mboxes(struct intel_ring_buffer *ring,
451             u32 seqno,
452             u32 mmio_offset)
453 {
454         intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
455                               MI_SEMAPHORE_GLOBAL_GTT |
456                               MI_SEMAPHORE_REGISTER |
457                               MI_SEMAPHORE_UPDATE);
458         intel_ring_emit(ring, seqno);
459         intel_ring_emit(ring, mmio_offset);
460 }
461
462 /**
463  * gen6_add_request - Update the semaphore mailbox registers
464  * 
465  * @ring - ring that is adding a request
466  * @seqno - return seqno stuck into the ring
467  *
468  * Update the mailbox registers in the *other* rings with the current seqno.
469  * This acts like a signal in the canonical semaphore.
470  */
471 static int
472 gen6_add_request(struct intel_ring_buffer *ring,
473                  u32 *seqno)
474 {
475         u32 mbox1_reg;
476         u32 mbox2_reg;
477         int ret;
478
479         ret = intel_ring_begin(ring, 10);
480         if (ret)
481                 return ret;
482
483         mbox1_reg = ring->signal_mbox[0];
484         mbox2_reg = ring->signal_mbox[1];
485
486         *seqno = i915_gem_next_request_seqno(ring);
487
488         update_mboxes(ring, *seqno, mbox1_reg);
489         update_mboxes(ring, *seqno, mbox2_reg);
490         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
491         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
492         intel_ring_emit(ring, *seqno);
493         intel_ring_emit(ring, MI_USER_INTERRUPT);
494         intel_ring_advance(ring);
495
496         return 0;
497 }
498
499 /**
500  * intel_ring_sync - sync the waiter to the signaller on seqno
501  *
502  * @waiter - ring that is waiting
503  * @signaller - ring which has, or will signal
504  * @seqno - seqno which the waiter will block on
505  */
506 static int
507 intel_ring_sync(struct intel_ring_buffer *waiter,
508                 struct intel_ring_buffer *signaller,
509                 int ring,
510                 u32 seqno)
511 {
512         int ret;
513         u32 dw1 = MI_SEMAPHORE_MBOX |
514                   MI_SEMAPHORE_COMPARE |
515                   MI_SEMAPHORE_REGISTER;
516
517         ret = intel_ring_begin(waiter, 4);
518         if (ret)
519                 return ret;
520
521         intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
522         intel_ring_emit(waiter, seqno);
523         intel_ring_emit(waiter, 0);
524         intel_ring_emit(waiter, MI_NOOP);
525         intel_ring_advance(waiter);
526
527         return 0;
528 }
529
530 /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
531 int
532 render_ring_sync_to(struct intel_ring_buffer *waiter,
533                     struct intel_ring_buffer *signaller,
534                     u32 seqno)
535 {
536         WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
537         return intel_ring_sync(waiter,
538                                signaller,
539                                RCS,
540                                seqno);
541 }
542
543 /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
544 int
545 gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
546                       struct intel_ring_buffer *signaller,
547                       u32 seqno)
548 {
549         WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
550         return intel_ring_sync(waiter,
551                                signaller,
552                                VCS,
553                                seqno);
554 }
555
556 /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
557 int
558 gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
559                       struct intel_ring_buffer *signaller,
560                       u32 seqno)
561 {
562         WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
563         return intel_ring_sync(waiter,
564                                signaller,
565                                BCS,
566                                seqno);
567 }
568
569
570
571 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
572 do {                                                                    \
573         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
574                  PIPE_CONTROL_DEPTH_STALL);                             \
575         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
576         intel_ring_emit(ring__, 0);                                                     \
577         intel_ring_emit(ring__, 0);                                                     \
578 } while (0)
579
580 static int
581 pc_render_add_request(struct intel_ring_buffer *ring,
582                       u32 *result)
583 {
584         u32 seqno = i915_gem_next_request_seqno(ring);
585         struct pipe_control *pc = ring->private;
586         u32 scratch_addr = pc->gtt_offset + 128;
587         int ret;
588
589         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
590          * incoherent with writes to memory, i.e. completely fubar,
591          * so we need to use PIPE_NOTIFY instead.
592          *
593          * However, we also need to workaround the qword write
594          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
595          * memory before requesting an interrupt.
596          */
597         ret = intel_ring_begin(ring, 32);
598         if (ret)
599                 return ret;
600
601         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
602                         PIPE_CONTROL_WRITE_FLUSH |
603                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
604         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
605         intel_ring_emit(ring, seqno);
606         intel_ring_emit(ring, 0);
607         PIPE_CONTROL_FLUSH(ring, scratch_addr);
608         scratch_addr += 128; /* write to separate cachelines */
609         PIPE_CONTROL_FLUSH(ring, scratch_addr);
610         scratch_addr += 128;
611         PIPE_CONTROL_FLUSH(ring, scratch_addr);
612         scratch_addr += 128;
613         PIPE_CONTROL_FLUSH(ring, scratch_addr);
614         scratch_addr += 128;
615         PIPE_CONTROL_FLUSH(ring, scratch_addr);
616         scratch_addr += 128;
617         PIPE_CONTROL_FLUSH(ring, scratch_addr);
618         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
619                         PIPE_CONTROL_WRITE_FLUSH |
620                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
621                         PIPE_CONTROL_NOTIFY);
622         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
623         intel_ring_emit(ring, seqno);
624         intel_ring_emit(ring, 0);
625         intel_ring_advance(ring);
626
627         *result = seqno;
628         return 0;
629 }
630
631 static int
632 render_ring_add_request(struct intel_ring_buffer *ring,
633                         u32 *result)
634 {
635         u32 seqno = i915_gem_next_request_seqno(ring);
636         int ret;
637
638         ret = intel_ring_begin(ring, 4);
639         if (ret)
640                 return ret;
641
642         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
643         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
644         intel_ring_emit(ring, seqno);
645         intel_ring_emit(ring, MI_USER_INTERRUPT);
646         intel_ring_advance(ring);
647
648         *result = seqno;
649         return 0;
650 }
651
652 static u32
653 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
654 {
655         struct drm_device *dev = ring->dev;
656
657         /* Workaround to force correct ordering between irq and seqno writes on
658          * ivb (and maybe also on snb) by reading from a CS register (like
659          * ACTHD) before reading the status page. */
660         if (IS_GEN7(dev))
661                 intel_ring_get_active_head(ring);
662         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
663 }
664
665 static u32
666 ring_get_seqno(struct intel_ring_buffer *ring)
667 {
668         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
669 }
670
671 static u32
672 pc_render_get_seqno(struct intel_ring_buffer *ring)
673 {
674         struct pipe_control *pc = ring->private;
675         return pc->cpu_page[0];
676 }
677
678 static void
679 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
680 {
681         dev_priv->gt_irq_mask &= ~mask;
682         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
683         POSTING_READ(GTIMR);
684 }
685
686 static void
687 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
688 {
689         dev_priv->gt_irq_mask |= mask;
690         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
691         POSTING_READ(GTIMR);
692 }
693
694 static void
695 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
696 {
697         dev_priv->irq_mask &= ~mask;
698         I915_WRITE(IMR, dev_priv->irq_mask);
699         POSTING_READ(IMR);
700 }
701
702 static void
703 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
704 {
705         dev_priv->irq_mask |= mask;
706         I915_WRITE(IMR, dev_priv->irq_mask);
707         POSTING_READ(IMR);
708 }
709
710 static bool
711 render_ring_get_irq(struct intel_ring_buffer *ring)
712 {
713         struct drm_device *dev = ring->dev;
714         drm_i915_private_t *dev_priv = dev->dev_private;
715
716         if (!dev->irq_enabled)
717                 return false;
718
719         spin_lock(&ring->irq_lock);
720         if (ring->irq_refcount++ == 0) {
721                 if (HAS_PCH_SPLIT(dev))
722                         ironlake_enable_irq(dev_priv,
723                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
724                 else
725                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
726         }
727         spin_unlock(&ring->irq_lock);
728
729         return true;
730 }
731
732 static void
733 render_ring_put_irq(struct intel_ring_buffer *ring)
734 {
735         struct drm_device *dev = ring->dev;
736         drm_i915_private_t *dev_priv = dev->dev_private;
737
738         spin_lock(&ring->irq_lock);
739         if (--ring->irq_refcount == 0) {
740                 if (HAS_PCH_SPLIT(dev))
741                         ironlake_disable_irq(dev_priv,
742                                              GT_USER_INTERRUPT |
743                                              GT_PIPE_NOTIFY);
744                 else
745                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
746         }
747         spin_unlock(&ring->irq_lock);
748 }
749
750 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
751 {
752         struct drm_device *dev = ring->dev;
753         drm_i915_private_t *dev_priv = ring->dev->dev_private;
754         u32 mmio = 0;
755
756         /* The ring status page addresses are no longer next to the rest of
757          * the ring registers as of gen7.
758          */
759         if (IS_GEN7(dev)) {
760                 switch (ring->id) {
761                 case RING_RENDER:
762                         mmio = RENDER_HWS_PGA_GEN7;
763                         break;
764                 case RING_BLT:
765                         mmio = BLT_HWS_PGA_GEN7;
766                         break;
767                 case RING_BSD:
768                         mmio = BSD_HWS_PGA_GEN7;
769                         break;
770                 }
771         } else if (IS_GEN6(ring->dev)) {
772                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
773         } else {
774                 mmio = RING_HWS_PGA(ring->mmio_base);
775         }
776
777         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
778         POSTING_READ(mmio);
779 }
780
781 static int
782 bsd_ring_flush(struct intel_ring_buffer *ring,
783                u32     invalidate_domains,
784                u32     flush_domains)
785 {
786         int ret;
787
788         ret = intel_ring_begin(ring, 2);
789         if (ret)
790                 return ret;
791
792         intel_ring_emit(ring, MI_FLUSH);
793         intel_ring_emit(ring, MI_NOOP);
794         intel_ring_advance(ring);
795         return 0;
796 }
797
798 static int
799 ring_add_request(struct intel_ring_buffer *ring,
800                  u32 *result)
801 {
802         u32 seqno;
803         int ret;
804
805         ret = intel_ring_begin(ring, 4);
806         if (ret)
807                 return ret;
808
809         seqno = i915_gem_next_request_seqno(ring);
810
811         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
812         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
813         intel_ring_emit(ring, seqno);
814         intel_ring_emit(ring, MI_USER_INTERRUPT);
815         intel_ring_advance(ring);
816
817         *result = seqno;
818         return 0;
819 }
820
821 static bool
822 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
823 {
824         struct drm_device *dev = ring->dev;
825         drm_i915_private_t *dev_priv = dev->dev_private;
826
827         if (!dev->irq_enabled)
828                return false;
829
830         /* It looks like we need to prevent the gt from suspending while waiting
831          * for an notifiy irq, otherwise irqs seem to get lost on at least the
832          * blt/bsd rings on ivb. */
833         if (IS_GEN7(dev))
834                 gen6_gt_force_wake_get(dev_priv);
835
836         spin_lock(&ring->irq_lock);
837         if (ring->irq_refcount++ == 0) {
838                 ring->irq_mask &= ~rflag;
839                 I915_WRITE_IMR(ring, ring->irq_mask);
840                 ironlake_enable_irq(dev_priv, gflag);
841         }
842         spin_unlock(&ring->irq_lock);
843
844         return true;
845 }
846
847 static void
848 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
849 {
850         struct drm_device *dev = ring->dev;
851         drm_i915_private_t *dev_priv = dev->dev_private;
852
853         spin_lock(&ring->irq_lock);
854         if (--ring->irq_refcount == 0) {
855                 ring->irq_mask |= rflag;
856                 I915_WRITE_IMR(ring, ring->irq_mask);
857                 ironlake_disable_irq(dev_priv, gflag);
858         }
859         spin_unlock(&ring->irq_lock);
860
861         if (IS_GEN7(dev))
862                 gen6_gt_force_wake_put(dev_priv);
863 }
864
865 static bool
866 bsd_ring_get_irq(struct intel_ring_buffer *ring)
867 {
868         struct drm_device *dev = ring->dev;
869         drm_i915_private_t *dev_priv = dev->dev_private;
870
871         if (!dev->irq_enabled)
872                 return false;
873
874         spin_lock(&ring->irq_lock);
875         if (ring->irq_refcount++ == 0) {
876                 if (IS_G4X(dev))
877                         i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
878                 else
879                         ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
880         }
881         spin_unlock(&ring->irq_lock);
882
883         return true;
884 }
885 static void
886 bsd_ring_put_irq(struct intel_ring_buffer *ring)
887 {
888         struct drm_device *dev = ring->dev;
889         drm_i915_private_t *dev_priv = dev->dev_private;
890
891         spin_lock(&ring->irq_lock);
892         if (--ring->irq_refcount == 0) {
893                 if (IS_G4X(dev))
894                         i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
895                 else
896                         ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
897         }
898         spin_unlock(&ring->irq_lock);
899 }
900
901 static int
902 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
903 {
904         int ret;
905
906         ret = intel_ring_begin(ring, 2);
907         if (ret)
908                 return ret;
909
910         intel_ring_emit(ring,
911                         MI_BATCH_BUFFER_START | (2 << 6) |
912                         MI_BATCH_NON_SECURE_I965);
913         intel_ring_emit(ring, offset);
914         intel_ring_advance(ring);
915
916         return 0;
917 }
918
919 static int
920 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
921                                 u32 offset, u32 len)
922 {
923         struct drm_device *dev = ring->dev;
924         int ret;
925
926         if (IS_I830(dev) || IS_845G(dev)) {
927                 ret = intel_ring_begin(ring, 4);
928                 if (ret)
929                         return ret;
930
931                 intel_ring_emit(ring, MI_BATCH_BUFFER);
932                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
933                 intel_ring_emit(ring, offset + len - 8);
934                 intel_ring_emit(ring, 0);
935         } else {
936                 ret = intel_ring_begin(ring, 2);
937                 if (ret)
938                         return ret;
939
940                 if (INTEL_INFO(dev)->gen >= 4) {
941                         intel_ring_emit(ring,
942                                         MI_BATCH_BUFFER_START | (2 << 6) |
943                                         MI_BATCH_NON_SECURE_I965);
944                         intel_ring_emit(ring, offset);
945                 } else {
946                         intel_ring_emit(ring,
947                                         MI_BATCH_BUFFER_START | (2 << 6));
948                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
949                 }
950         }
951         intel_ring_advance(ring);
952
953         return 0;
954 }
955
956 static void cleanup_status_page(struct intel_ring_buffer *ring)
957 {
958         drm_i915_private_t *dev_priv = ring->dev->dev_private;
959         struct drm_i915_gem_object *obj;
960
961         obj = ring->status_page.obj;
962         if (obj == NULL)
963                 return;
964
965         kunmap(obj->pages[0]);
966         i915_gem_object_unpin(obj);
967         drm_gem_object_unreference(&obj->base);
968         ring->status_page.obj = NULL;
969
970         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
971 }
972
973 static int init_status_page(struct intel_ring_buffer *ring)
974 {
975         struct drm_device *dev = ring->dev;
976         drm_i915_private_t *dev_priv = dev->dev_private;
977         struct drm_i915_gem_object *obj;
978         int ret;
979
980         obj = i915_gem_alloc_object(dev, 4096);
981         if (obj == NULL) {
982                 DRM_ERROR("Failed to allocate status page\n");
983                 ret = -ENOMEM;
984                 goto err;
985         }
986
987         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
988
989         ret = i915_gem_object_pin(obj, 4096, true);
990         if (ret != 0) {
991                 goto err_unref;
992         }
993
994         ring->status_page.gfx_addr = obj->gtt_offset;
995         ring->status_page.page_addr = kmap(obj->pages[0]);
996         if (ring->status_page.page_addr == NULL) {
997                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
998                 goto err_unpin;
999         }
1000         ring->status_page.obj = obj;
1001         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1002
1003         intel_ring_setup_status_page(ring);
1004         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1005                         ring->name, ring->status_page.gfx_addr);
1006
1007         return 0;
1008
1009 err_unpin:
1010         i915_gem_object_unpin(obj);
1011 err_unref:
1012         drm_gem_object_unreference(&obj->base);
1013 err:
1014         return ret;
1015 }
1016
1017 int intel_init_ring_buffer(struct drm_device *dev,
1018                            struct intel_ring_buffer *ring)
1019 {
1020         struct drm_i915_gem_object *obj;
1021         int ret;
1022
1023         ring->dev = dev;
1024         INIT_LIST_HEAD(&ring->active_list);
1025         INIT_LIST_HEAD(&ring->request_list);
1026         INIT_LIST_HEAD(&ring->gpu_write_list);
1027
1028         init_waitqueue_head(&ring->irq_queue);
1029         spin_lock_init(&ring->irq_lock);
1030         ring->irq_mask = ~0;
1031
1032         if (I915_NEED_GFX_HWS(dev)) {
1033                 ret = init_status_page(ring);
1034                 if (ret)
1035                         return ret;
1036         }
1037
1038         obj = i915_gem_alloc_object(dev, ring->size);
1039         if (obj == NULL) {
1040                 DRM_ERROR("Failed to allocate ringbuffer\n");
1041                 ret = -ENOMEM;
1042                 goto err_hws;
1043         }
1044
1045         ring->obj = obj;
1046
1047         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1048         if (ret)
1049                 goto err_unref;
1050
1051         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1052         if (ret)
1053                 goto err_unpin;
1054
1055         ring->map.size = ring->size;
1056         ring->map.offset = dev->agp->base + obj->gtt_offset;
1057         ring->map.type = 0;
1058         ring->map.flags = 0;
1059         ring->map.mtrr = 0;
1060
1061         drm_core_ioremap_wc(&ring->map, dev);
1062         if (ring->map.handle == NULL) {
1063                 DRM_ERROR("Failed to map ringbuffer.\n");
1064                 ret = -EINVAL;
1065                 goto err_unpin;
1066         }
1067
1068         ring->virtual_start = ring->map.handle;
1069         ret = ring->init(ring);
1070         if (ret)
1071                 goto err_unmap;
1072
1073         /* Workaround an erratum on the i830 which causes a hang if
1074          * the TAIL pointer points to within the last 2 cachelines
1075          * of the buffer.
1076          */
1077         ring->effective_size = ring->size;
1078         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1079                 ring->effective_size -= 128;
1080
1081         return 0;
1082
1083 err_unmap:
1084         drm_core_ioremapfree(&ring->map, dev);
1085 err_unpin:
1086         i915_gem_object_unpin(obj);
1087 err_unref:
1088         drm_gem_object_unreference(&obj->base);
1089         ring->obj = NULL;
1090 err_hws:
1091         cleanup_status_page(ring);
1092         return ret;
1093 }
1094
1095 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1096 {
1097         struct drm_i915_private *dev_priv;
1098         int ret;
1099
1100         if (ring->obj == NULL)
1101                 return;
1102
1103         /* Disable the ring buffer. The ring must be idle at this point */
1104         dev_priv = ring->dev->dev_private;
1105         ret = intel_wait_ring_idle(ring);
1106         if (ret)
1107                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1108                           ring->name, ret);
1109
1110         I915_WRITE_CTL(ring, 0);
1111
1112         drm_core_ioremapfree(&ring->map, ring->dev);
1113
1114         i915_gem_object_unpin(ring->obj);
1115         drm_gem_object_unreference(&ring->obj->base);
1116         ring->obj = NULL;
1117
1118         if (ring->cleanup)
1119                 ring->cleanup(ring);
1120
1121         cleanup_status_page(ring);
1122 }
1123
1124 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1125 {
1126         unsigned int *virt;
1127         int rem = ring->size - ring->tail;
1128
1129         if (ring->space < rem) {
1130                 int ret = intel_wait_ring_buffer(ring, rem);
1131                 if (ret)
1132                         return ret;
1133         }
1134
1135         virt = (unsigned int *)(ring->virtual_start + ring->tail);
1136         rem /= 8;
1137         while (rem--) {
1138                 *virt++ = MI_NOOP;
1139                 *virt++ = MI_NOOP;
1140         }
1141
1142         ring->tail = 0;
1143         ring->space = ring_space(ring);
1144
1145         return 0;
1146 }
1147
1148 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1149 {
1150         struct drm_device *dev = ring->dev;
1151         struct drm_i915_private *dev_priv = dev->dev_private;
1152         unsigned long end;
1153
1154         trace_i915_ring_wait_begin(ring);
1155         end = jiffies + 3 * HZ;
1156         do {
1157                 ring->head = I915_READ_HEAD(ring);
1158                 ring->space = ring_space(ring);
1159                 if (ring->space >= n) {
1160                         trace_i915_ring_wait_end(ring);
1161                         return 0;
1162                 }
1163
1164                 if (dev->primary->master) {
1165                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1166                         if (master_priv->sarea_priv)
1167                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1168                 }
1169
1170                 msleep(1);
1171                 if (atomic_read(&dev_priv->mm.wedged))
1172                         return -EAGAIN;
1173         } while (!time_after(jiffies, end));
1174         trace_i915_ring_wait_end(ring);
1175         return -EBUSY;
1176 }
1177
1178 int intel_ring_begin(struct intel_ring_buffer *ring,
1179                      int num_dwords)
1180 {
1181         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1182         int n = 4*num_dwords;
1183         int ret;
1184
1185         if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1186                 return -EIO;
1187
1188         if (unlikely(ring->tail + n > ring->effective_size)) {
1189                 ret = intel_wrap_ring_buffer(ring);
1190                 if (unlikely(ret))
1191                         return ret;
1192         }
1193
1194         if (unlikely(ring->space < n)) {
1195                 ret = intel_wait_ring_buffer(ring, n);
1196                 if (unlikely(ret))
1197                         return ret;
1198         }
1199
1200         ring->space -= n;
1201         return 0;
1202 }
1203
1204 void intel_ring_advance(struct intel_ring_buffer *ring)
1205 {
1206         ring->tail &= ring->size - 1;
1207         ring->write_tail(ring, ring->tail);
1208 }
1209
1210 static const struct intel_ring_buffer render_ring = {
1211         .name                   = "render ring",
1212         .id                     = RING_RENDER,
1213         .mmio_base              = RENDER_RING_BASE,
1214         .size                   = 32 * PAGE_SIZE,
1215         .init                   = init_render_ring,
1216         .write_tail             = ring_write_tail,
1217         .flush                  = render_ring_flush,
1218         .add_request            = render_ring_add_request,
1219         .get_seqno              = ring_get_seqno,
1220         .irq_get                = render_ring_get_irq,
1221         .irq_put                = render_ring_put_irq,
1222         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1223         .cleanup                = render_ring_cleanup,
1224         .sync_to                = render_ring_sync_to,
1225         .semaphore_register     = {MI_SEMAPHORE_SYNC_INVALID,
1226                                    MI_SEMAPHORE_SYNC_RV,
1227                                    MI_SEMAPHORE_SYNC_RB},
1228         .signal_mbox            = {GEN6_VRSYNC, GEN6_BRSYNC},
1229 };
1230
1231 /* ring buffer for bit-stream decoder */
1232
1233 static const struct intel_ring_buffer bsd_ring = {
1234         .name                   = "bsd ring",
1235         .id                     = RING_BSD,
1236         .mmio_base              = BSD_RING_BASE,
1237         .size                   = 32 * PAGE_SIZE,
1238         .init                   = init_ring_common,
1239         .write_tail             = ring_write_tail,
1240         .flush                  = bsd_ring_flush,
1241         .add_request            = ring_add_request,
1242         .get_seqno              = ring_get_seqno,
1243         .irq_get                = bsd_ring_get_irq,
1244         .irq_put                = bsd_ring_put_irq,
1245         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1246 };
1247
1248
1249 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1250                                      u32 value)
1251 {
1252         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1253
1254        /* Every tail move must follow the sequence below */
1255         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1256                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1257                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1258         I915_WRITE(GEN6_BSD_RNCID, 0x0);
1259
1260         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1261                 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1262                 50))
1263         DRM_ERROR("timed out waiting for IDLE Indicator\n");
1264
1265         I915_WRITE_TAIL(ring, value);
1266         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1267                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1268                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1269 }
1270
1271 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1272                            u32 invalidate, u32 flush)
1273 {
1274         uint32_t cmd;
1275         int ret;
1276
1277         ret = intel_ring_begin(ring, 4);
1278         if (ret)
1279                 return ret;
1280
1281         cmd = MI_FLUSH_DW;
1282         if (invalidate & I915_GEM_GPU_DOMAINS)
1283                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1284         intel_ring_emit(ring, cmd);
1285         intel_ring_emit(ring, 0);
1286         intel_ring_emit(ring, 0);
1287         intel_ring_emit(ring, MI_NOOP);
1288         intel_ring_advance(ring);
1289         return 0;
1290 }
1291
1292 static int
1293 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1294                               u32 offset, u32 len)
1295 {
1296         int ret;
1297
1298         ret = intel_ring_begin(ring, 2);
1299         if (ret)
1300                 return ret;
1301
1302         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1303         /* bit0-7 is the length on GEN6+ */
1304         intel_ring_emit(ring, offset);
1305         intel_ring_advance(ring);
1306
1307         return 0;
1308 }
1309
1310 static bool
1311 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1312 {
1313         return gen6_ring_get_irq(ring,
1314                                  GT_USER_INTERRUPT,
1315                                  GEN6_RENDER_USER_INTERRUPT);
1316 }
1317
1318 static void
1319 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1320 {
1321         return gen6_ring_put_irq(ring,
1322                                  GT_USER_INTERRUPT,
1323                                  GEN6_RENDER_USER_INTERRUPT);
1324 }
1325
1326 static bool
1327 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1328 {
1329         return gen6_ring_get_irq(ring,
1330                                  GT_GEN6_BSD_USER_INTERRUPT,
1331                                  GEN6_BSD_USER_INTERRUPT);
1332 }
1333
1334 static void
1335 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1336 {
1337         return gen6_ring_put_irq(ring,
1338                                  GT_GEN6_BSD_USER_INTERRUPT,
1339                                  GEN6_BSD_USER_INTERRUPT);
1340 }
1341
1342 /* ring buffer for Video Codec for Gen6+ */
1343 static const struct intel_ring_buffer gen6_bsd_ring = {
1344         .name                   = "gen6 bsd ring",
1345         .id                     = RING_BSD,
1346         .mmio_base              = GEN6_BSD_RING_BASE,
1347         .size                   = 32 * PAGE_SIZE,
1348         .init                   = init_ring_common,
1349         .write_tail             = gen6_bsd_ring_write_tail,
1350         .flush                  = gen6_ring_flush,
1351         .add_request            = gen6_add_request,
1352         .get_seqno              = gen6_ring_get_seqno,
1353         .irq_get                = gen6_bsd_ring_get_irq,
1354         .irq_put                = gen6_bsd_ring_put_irq,
1355         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1356         .sync_to                = gen6_bsd_ring_sync_to,
1357         .semaphore_register     = {MI_SEMAPHORE_SYNC_VR,
1358                                    MI_SEMAPHORE_SYNC_INVALID,
1359                                    MI_SEMAPHORE_SYNC_VB},
1360         .signal_mbox            = {GEN6_RVSYNC, GEN6_BVSYNC},
1361 };
1362
1363 /* Blitter support (SandyBridge+) */
1364
1365 static bool
1366 blt_ring_get_irq(struct intel_ring_buffer *ring)
1367 {
1368         return gen6_ring_get_irq(ring,
1369                                  GT_BLT_USER_INTERRUPT,
1370                                  GEN6_BLITTER_USER_INTERRUPT);
1371 }
1372
1373 static void
1374 blt_ring_put_irq(struct intel_ring_buffer *ring)
1375 {
1376         gen6_ring_put_irq(ring,
1377                           GT_BLT_USER_INTERRUPT,
1378                           GEN6_BLITTER_USER_INTERRUPT);
1379 }
1380
1381
1382 /* Workaround for some stepping of SNB,
1383  * each time when BLT engine ring tail moved,
1384  * the first command in the ring to be parsed
1385  * should be MI_BATCH_BUFFER_START
1386  */
1387 #define NEED_BLT_WORKAROUND(dev) \
1388         (IS_GEN6(dev) && (dev->pdev->revision < 8))
1389
1390 static inline struct drm_i915_gem_object *
1391 to_blt_workaround(struct intel_ring_buffer *ring)
1392 {
1393         return ring->private;
1394 }
1395
1396 static int blt_ring_init(struct intel_ring_buffer *ring)
1397 {
1398         if (NEED_BLT_WORKAROUND(ring->dev)) {
1399                 struct drm_i915_gem_object *obj;
1400                 u32 *ptr;
1401                 int ret;
1402
1403                 obj = i915_gem_alloc_object(ring->dev, 4096);
1404                 if (obj == NULL)
1405                         return -ENOMEM;
1406
1407                 ret = i915_gem_object_pin(obj, 4096, true);
1408                 if (ret) {
1409                         drm_gem_object_unreference(&obj->base);
1410                         return ret;
1411                 }
1412
1413                 ptr = kmap(obj->pages[0]);
1414                 *ptr++ = MI_BATCH_BUFFER_END;
1415                 *ptr++ = MI_NOOP;
1416                 kunmap(obj->pages[0]);
1417
1418                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1419                 if (ret) {
1420                         i915_gem_object_unpin(obj);
1421                         drm_gem_object_unreference(&obj->base);
1422                         return ret;
1423                 }
1424
1425                 ring->private = obj;
1426         }
1427
1428         return init_ring_common(ring);
1429 }
1430
1431 static int blt_ring_begin(struct intel_ring_buffer *ring,
1432                           int num_dwords)
1433 {
1434         if (ring->private) {
1435                 int ret = intel_ring_begin(ring, num_dwords+2);
1436                 if (ret)
1437                         return ret;
1438
1439                 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1440                 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1441
1442                 return 0;
1443         } else
1444                 return intel_ring_begin(ring, 4);
1445 }
1446
1447 static int blt_ring_flush(struct intel_ring_buffer *ring,
1448                           u32 invalidate, u32 flush)
1449 {
1450         uint32_t cmd;
1451         int ret;
1452
1453         ret = blt_ring_begin(ring, 4);
1454         if (ret)
1455                 return ret;
1456
1457         cmd = MI_FLUSH_DW;
1458         if (invalidate & I915_GEM_DOMAIN_RENDER)
1459                 cmd |= MI_INVALIDATE_TLB;
1460         intel_ring_emit(ring, cmd);
1461         intel_ring_emit(ring, 0);
1462         intel_ring_emit(ring, 0);
1463         intel_ring_emit(ring, MI_NOOP);
1464         intel_ring_advance(ring);
1465         return 0;
1466 }
1467
1468 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1469 {
1470         if (!ring->private)
1471                 return;
1472
1473         i915_gem_object_unpin(ring->private);
1474         drm_gem_object_unreference(ring->private);
1475         ring->private = NULL;
1476 }
1477
1478 static const struct intel_ring_buffer gen6_blt_ring = {
1479         .name                   = "blt ring",
1480         .id                     = RING_BLT,
1481         .mmio_base              = BLT_RING_BASE,
1482         .size                   = 32 * PAGE_SIZE,
1483         .init                   = blt_ring_init,
1484         .write_tail             = ring_write_tail,
1485         .flush                  = blt_ring_flush,
1486         .add_request            = gen6_add_request,
1487         .get_seqno              = gen6_ring_get_seqno,
1488         .irq_get                = blt_ring_get_irq,
1489         .irq_put                = blt_ring_put_irq,
1490         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1491         .cleanup                = blt_ring_cleanup,
1492         .sync_to                = gen6_blt_ring_sync_to,
1493         .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
1494                                    MI_SEMAPHORE_SYNC_BV,
1495                                    MI_SEMAPHORE_SYNC_INVALID},
1496         .signal_mbox            = {GEN6_RBSYNC, GEN6_VBSYNC},
1497 };
1498
1499 int intel_init_render_ring_buffer(struct drm_device *dev)
1500 {
1501         drm_i915_private_t *dev_priv = dev->dev_private;
1502         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1503
1504         *ring = render_ring;
1505         if (INTEL_INFO(dev)->gen >= 6) {
1506                 ring->add_request = gen6_add_request;
1507                 ring->flush = gen6_render_ring_flush;
1508                 ring->irq_get = gen6_render_ring_get_irq;
1509                 ring->irq_put = gen6_render_ring_put_irq;
1510                 ring->get_seqno = gen6_ring_get_seqno;
1511         } else if (IS_GEN5(dev)) {
1512                 ring->add_request = pc_render_add_request;
1513                 ring->get_seqno = pc_render_get_seqno;
1514         }
1515
1516         if (!I915_NEED_GFX_HWS(dev)) {
1517                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1518                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1519         }
1520
1521         return intel_init_ring_buffer(dev, ring);
1522 }
1523
1524 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1525 {
1526         drm_i915_private_t *dev_priv = dev->dev_private;
1527         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1528
1529         *ring = render_ring;
1530         if (INTEL_INFO(dev)->gen >= 6) {
1531                 ring->add_request = gen6_add_request;
1532                 ring->irq_get = gen6_render_ring_get_irq;
1533                 ring->irq_put = gen6_render_ring_put_irq;
1534         } else if (IS_GEN5(dev)) {
1535                 ring->add_request = pc_render_add_request;
1536                 ring->get_seqno = pc_render_get_seqno;
1537         }
1538
1539         if (!I915_NEED_GFX_HWS(dev))
1540                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1541
1542         ring->dev = dev;
1543         INIT_LIST_HEAD(&ring->active_list);
1544         INIT_LIST_HEAD(&ring->request_list);
1545         INIT_LIST_HEAD(&ring->gpu_write_list);
1546
1547         ring->size = size;
1548         ring->effective_size = ring->size;
1549         if (IS_I830(ring->dev))
1550                 ring->effective_size -= 128;
1551
1552         ring->map.offset = start;
1553         ring->map.size = size;
1554         ring->map.type = 0;
1555         ring->map.flags = 0;
1556         ring->map.mtrr = 0;
1557
1558         drm_core_ioremap_wc(&ring->map, dev);
1559         if (ring->map.handle == NULL) {
1560                 DRM_ERROR("can not ioremap virtual address for"
1561                           " ring buffer\n");
1562                 return -ENOMEM;
1563         }
1564
1565         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1566         return 0;
1567 }
1568
1569 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1570 {
1571         drm_i915_private_t *dev_priv = dev->dev_private;
1572         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1573
1574         if (IS_GEN6(dev) || IS_GEN7(dev))
1575                 *ring = gen6_bsd_ring;
1576         else
1577                 *ring = bsd_ring;
1578
1579         return intel_init_ring_buffer(dev, ring);
1580 }
1581
1582 int intel_init_blt_ring_buffer(struct drm_device *dev)
1583 {
1584         drm_i915_private_t *dev_priv = dev->dev_private;
1585         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1586
1587         *ring = gen6_blt_ring;
1588
1589         return intel_init_ring_buffer(dev, ring);
1590 }