]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_ringbuffer.c
Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 /*
38  * 965+ support PIPE_CONTROL commands, which provide finer grained control
39  * over cache flushing.
40  */
41 struct pipe_control {
42         struct drm_i915_gem_object *obj;
43         volatile u32 *cpu_page;
44         u32 gtt_offset;
45 };
46
47 static inline int ring_space(struct intel_ring_buffer *ring)
48 {
49         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50         if (space < 0)
51                 space += ring->size;
52         return space;
53 }
54
55 static int
56 render_ring_flush(struct intel_ring_buffer *ring,
57                   u32   invalidate_domains,
58                   u32   flush_domains)
59 {
60         struct drm_device *dev = ring->dev;
61         u32 cmd;
62         int ret;
63
64         /*
65          * read/write caches:
66          *
67          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
68          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
69          * also flushed at 2d versus 3d pipeline switches.
70          *
71          * read-only caches:
72          *
73          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
74          * MI_READ_FLUSH is set, and is always flushed on 965.
75          *
76          * I915_GEM_DOMAIN_COMMAND may not exist?
77          *
78          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
79          * invalidated when MI_EXE_FLUSH is set.
80          *
81          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
82          * invalidated with every MI_FLUSH.
83          *
84          * TLBs:
85          *
86          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
87          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
88          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
89          * are flushed at any MI_FLUSH.
90          */
91
92         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
93         if ((invalidate_domains|flush_domains) &
94             I915_GEM_DOMAIN_RENDER)
95                 cmd &= ~MI_NO_WRITE_FLUSH;
96         if (INTEL_INFO(dev)->gen < 4) {
97                 /*
98                  * On the 965, the sampler cache always gets flushed
99                  * and this bit is reserved.
100                  */
101                 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
102                         cmd |= MI_READ_FLUSH;
103         }
104         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
105                 cmd |= MI_EXE_FLUSH;
106
107         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
108             (IS_G4X(dev) || IS_GEN5(dev)))
109                 cmd |= MI_INVALIDATE_ISP;
110
111         ret = intel_ring_begin(ring, 2);
112         if (ret)
113                 return ret;
114
115         intel_ring_emit(ring, cmd);
116         intel_ring_emit(ring, MI_NOOP);
117         intel_ring_advance(ring);
118
119         return 0;
120 }
121
122 /**
123  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
124  * implementing two workarounds on gen6.  From section 1.4.7.1
125  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
126  *
127  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
128  * produced by non-pipelined state commands), software needs to first
129  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
130  * 0.
131  *
132  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
133  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
134  *
135  * And the workaround for these two requires this workaround first:
136  *
137  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
138  * BEFORE the pipe-control with a post-sync op and no write-cache
139  * flushes.
140  *
141  * And this last workaround is tricky because of the requirements on
142  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
143  * volume 2 part 1:
144  *
145  *     "1 of the following must also be set:
146  *      - Render Target Cache Flush Enable ([12] of DW1)
147  *      - Depth Cache Flush Enable ([0] of DW1)
148  *      - Stall at Pixel Scoreboard ([1] of DW1)
149  *      - Depth Stall ([13] of DW1)
150  *      - Post-Sync Operation ([13] of DW1)
151  *      - Notify Enable ([8] of DW1)"
152  *
153  * The cache flushes require the workaround flush that triggered this
154  * one, so we can't use it.  Depth stall would trigger the same.
155  * Post-sync nonzero is what triggered this second workaround, so we
156  * can't use that one either.  Notify enable is IRQs, which aren't
157  * really our business.  That leaves only stall at scoreboard.
158  */
159 static int
160 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
161 {
162         struct pipe_control *pc = ring->private;
163         u32 scratch_addr = pc->gtt_offset + 128;
164         int ret;
165
166
167         ret = intel_ring_begin(ring, 6);
168         if (ret)
169                 return ret;
170
171         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
172         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
173                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
174         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
175         intel_ring_emit(ring, 0); /* low dword */
176         intel_ring_emit(ring, 0); /* high dword */
177         intel_ring_emit(ring, MI_NOOP);
178         intel_ring_advance(ring);
179
180         ret = intel_ring_begin(ring, 6);
181         if (ret)
182                 return ret;
183
184         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
185         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
186         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
187         intel_ring_emit(ring, 0);
188         intel_ring_emit(ring, 0);
189         intel_ring_emit(ring, MI_NOOP);
190         intel_ring_advance(ring);
191
192         return 0;
193 }
194
195 static int
196 gen6_render_ring_flush(struct intel_ring_buffer *ring,
197                          u32 invalidate_domains, u32 flush_domains)
198 {
199         u32 flags = 0;
200         struct pipe_control *pc = ring->private;
201         u32 scratch_addr = pc->gtt_offset + 128;
202         int ret;
203
204         /* Force SNB workarounds for PIPE_CONTROL flushes */
205         intel_emit_post_sync_nonzero_flush(ring);
206
207         /* Just flush everything.  Experiments have shown that reducing the
208          * number of bits based on the write domains has little performance
209          * impact.
210          */
211         flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
212         flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
213         flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
214         flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
215         flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
216         flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
217         flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
218
219         ret = intel_ring_begin(ring, 6);
220         if (ret)
221                 return ret;
222
223         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
224         intel_ring_emit(ring, flags);
225         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
226         intel_ring_emit(ring, 0); /* lower dword */
227         intel_ring_emit(ring, 0); /* uppwer dword */
228         intel_ring_emit(ring, MI_NOOP);
229         intel_ring_advance(ring);
230
231         return 0;
232 }
233
234 static void ring_write_tail(struct intel_ring_buffer *ring,
235                             u32 value)
236 {
237         drm_i915_private_t *dev_priv = ring->dev->dev_private;
238         I915_WRITE_TAIL(ring, value);
239 }
240
241 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
242 {
243         drm_i915_private_t *dev_priv = ring->dev->dev_private;
244         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
245                         RING_ACTHD(ring->mmio_base) : ACTHD;
246
247         return I915_READ(acthd_reg);
248 }
249
250 static int init_ring_common(struct intel_ring_buffer *ring)
251 {
252         drm_i915_private_t *dev_priv = ring->dev->dev_private;
253         struct drm_i915_gem_object *obj = ring->obj;
254         u32 head;
255
256         /* Stop the ring if it's running. */
257         I915_WRITE_CTL(ring, 0);
258         I915_WRITE_HEAD(ring, 0);
259         ring->write_tail(ring, 0);
260
261         /* Initialize the ring. */
262         I915_WRITE_START(ring, obj->gtt_offset);
263         head = I915_READ_HEAD(ring) & HEAD_ADDR;
264
265         /* G45 ring initialization fails to reset head to zero */
266         if (head != 0) {
267                 DRM_DEBUG_KMS("%s head not reset to zero "
268                               "ctl %08x head %08x tail %08x start %08x\n",
269                               ring->name,
270                               I915_READ_CTL(ring),
271                               I915_READ_HEAD(ring),
272                               I915_READ_TAIL(ring),
273                               I915_READ_START(ring));
274
275                 I915_WRITE_HEAD(ring, 0);
276
277                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
278                         DRM_ERROR("failed to set %s head to zero "
279                                   "ctl %08x head %08x tail %08x start %08x\n",
280                                   ring->name,
281                                   I915_READ_CTL(ring),
282                                   I915_READ_HEAD(ring),
283                                   I915_READ_TAIL(ring),
284                                   I915_READ_START(ring));
285                 }
286         }
287
288         I915_WRITE_CTL(ring,
289                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
290                         | RING_VALID);
291
292         /* If the head is still not zero, the ring is dead */
293         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
294                      I915_READ_START(ring) == obj->gtt_offset &&
295                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
296                 DRM_ERROR("%s initialization failed "
297                                 "ctl %08x head %08x tail %08x start %08x\n",
298                                 ring->name,
299                                 I915_READ_CTL(ring),
300                                 I915_READ_HEAD(ring),
301                                 I915_READ_TAIL(ring),
302                                 I915_READ_START(ring));
303                 return -EIO;
304         }
305
306         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
307                 i915_kernel_lost_context(ring->dev);
308         else {
309                 ring->head = I915_READ_HEAD(ring);
310                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
311                 ring->space = ring_space(ring);
312         }
313
314         return 0;
315 }
316
317 static int
318 init_pipe_control(struct intel_ring_buffer *ring)
319 {
320         struct pipe_control *pc;
321         struct drm_i915_gem_object *obj;
322         int ret;
323
324         if (ring->private)
325                 return 0;
326
327         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
328         if (!pc)
329                 return -ENOMEM;
330
331         obj = i915_gem_alloc_object(ring->dev, 4096);
332         if (obj == NULL) {
333                 DRM_ERROR("Failed to allocate seqno page\n");
334                 ret = -ENOMEM;
335                 goto err;
336         }
337
338         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
339
340         ret = i915_gem_object_pin(obj, 4096, true);
341         if (ret)
342                 goto err_unref;
343
344         pc->gtt_offset = obj->gtt_offset;
345         pc->cpu_page =  kmap(obj->pages[0]);
346         if (pc->cpu_page == NULL)
347                 goto err_unpin;
348
349         pc->obj = obj;
350         ring->private = pc;
351         return 0;
352
353 err_unpin:
354         i915_gem_object_unpin(obj);
355 err_unref:
356         drm_gem_object_unreference(&obj->base);
357 err:
358         kfree(pc);
359         return ret;
360 }
361
362 static void
363 cleanup_pipe_control(struct intel_ring_buffer *ring)
364 {
365         struct pipe_control *pc = ring->private;
366         struct drm_i915_gem_object *obj;
367
368         if (!ring->private)
369                 return;
370
371         obj = pc->obj;
372         kunmap(obj->pages[0]);
373         i915_gem_object_unpin(obj);
374         drm_gem_object_unreference(&obj->base);
375
376         kfree(pc);
377         ring->private = NULL;
378 }
379
380 static int init_render_ring(struct intel_ring_buffer *ring)
381 {
382         struct drm_device *dev = ring->dev;
383         struct drm_i915_private *dev_priv = dev->dev_private;
384         int ret = init_ring_common(ring);
385
386         if (INTEL_INFO(dev)->gen > 3) {
387                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
388                 I915_WRITE(MI_MODE, mode);
389                 if (IS_GEN7(dev))
390                         I915_WRITE(GFX_MODE_GEN7,
391                                    GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
392                                    GFX_MODE_ENABLE(GFX_REPLAY_MODE));
393         }
394
395         if (INTEL_INFO(dev)->gen >= 5) {
396                 ret = init_pipe_control(ring);
397                 if (ret)
398                         return ret;
399         }
400
401         if (INTEL_INFO(dev)->gen >= 6) {
402                 I915_WRITE(INSTPM,
403                            INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
404         }
405
406         return ret;
407 }
408
409 static void render_ring_cleanup(struct intel_ring_buffer *ring)
410 {
411         if (!ring->private)
412                 return;
413
414         cleanup_pipe_control(ring);
415 }
416
417 static void
418 update_mboxes(struct intel_ring_buffer *ring,
419             u32 seqno,
420             u32 mmio_offset)
421 {
422         intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
423                               MI_SEMAPHORE_GLOBAL_GTT |
424                               MI_SEMAPHORE_REGISTER |
425                               MI_SEMAPHORE_UPDATE);
426         intel_ring_emit(ring, seqno);
427         intel_ring_emit(ring, mmio_offset);
428 }
429
430 /**
431  * gen6_add_request - Update the semaphore mailbox registers
432  * 
433  * @ring - ring that is adding a request
434  * @seqno - return seqno stuck into the ring
435  *
436  * Update the mailbox registers in the *other* rings with the current seqno.
437  * This acts like a signal in the canonical semaphore.
438  */
439 static int
440 gen6_add_request(struct intel_ring_buffer *ring,
441                  u32 *seqno)
442 {
443         u32 mbox1_reg;
444         u32 mbox2_reg;
445         int ret;
446
447         ret = intel_ring_begin(ring, 10);
448         if (ret)
449                 return ret;
450
451         mbox1_reg = ring->signal_mbox[0];
452         mbox2_reg = ring->signal_mbox[1];
453
454         *seqno = i915_gem_next_request_seqno(ring);
455
456         update_mboxes(ring, *seqno, mbox1_reg);
457         update_mboxes(ring, *seqno, mbox2_reg);
458         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
459         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
460         intel_ring_emit(ring, *seqno);
461         intel_ring_emit(ring, MI_USER_INTERRUPT);
462         intel_ring_advance(ring);
463
464         return 0;
465 }
466
467 /**
468  * intel_ring_sync - sync the waiter to the signaller on seqno
469  *
470  * @waiter - ring that is waiting
471  * @signaller - ring which has, or will signal
472  * @seqno - seqno which the waiter will block on
473  */
474 static int
475 intel_ring_sync(struct intel_ring_buffer *waiter,
476                 struct intel_ring_buffer *signaller,
477                 int ring,
478                 u32 seqno)
479 {
480         int ret;
481         u32 dw1 = MI_SEMAPHORE_MBOX |
482                   MI_SEMAPHORE_COMPARE |
483                   MI_SEMAPHORE_REGISTER;
484
485         ret = intel_ring_begin(waiter, 4);
486         if (ret)
487                 return ret;
488
489         intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
490         intel_ring_emit(waiter, seqno);
491         intel_ring_emit(waiter, 0);
492         intel_ring_emit(waiter, MI_NOOP);
493         intel_ring_advance(waiter);
494
495         return 0;
496 }
497
498 /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
499 int
500 render_ring_sync_to(struct intel_ring_buffer *waiter,
501                     struct intel_ring_buffer *signaller,
502                     u32 seqno)
503 {
504         WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
505         return intel_ring_sync(waiter,
506                                signaller,
507                                RCS,
508                                seqno);
509 }
510
511 /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
512 int
513 gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
514                       struct intel_ring_buffer *signaller,
515                       u32 seqno)
516 {
517         WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
518         return intel_ring_sync(waiter,
519                                signaller,
520                                VCS,
521                                seqno);
522 }
523
524 /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
525 int
526 gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
527                       struct intel_ring_buffer *signaller,
528                       u32 seqno)
529 {
530         WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
531         return intel_ring_sync(waiter,
532                                signaller,
533                                BCS,
534                                seqno);
535 }
536
537
538
539 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
540 do {                                                                    \
541         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
542                  PIPE_CONTROL_DEPTH_STALL);                             \
543         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
544         intel_ring_emit(ring__, 0);                                                     \
545         intel_ring_emit(ring__, 0);                                                     \
546 } while (0)
547
548 static int
549 pc_render_add_request(struct intel_ring_buffer *ring,
550                       u32 *result)
551 {
552         u32 seqno = i915_gem_next_request_seqno(ring);
553         struct pipe_control *pc = ring->private;
554         u32 scratch_addr = pc->gtt_offset + 128;
555         int ret;
556
557         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
558          * incoherent with writes to memory, i.e. completely fubar,
559          * so we need to use PIPE_NOTIFY instead.
560          *
561          * However, we also need to workaround the qword write
562          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
563          * memory before requesting an interrupt.
564          */
565         ret = intel_ring_begin(ring, 32);
566         if (ret)
567                 return ret;
568
569         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
570                         PIPE_CONTROL_WRITE_FLUSH |
571                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
572         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
573         intel_ring_emit(ring, seqno);
574         intel_ring_emit(ring, 0);
575         PIPE_CONTROL_FLUSH(ring, scratch_addr);
576         scratch_addr += 128; /* write to separate cachelines */
577         PIPE_CONTROL_FLUSH(ring, scratch_addr);
578         scratch_addr += 128;
579         PIPE_CONTROL_FLUSH(ring, scratch_addr);
580         scratch_addr += 128;
581         PIPE_CONTROL_FLUSH(ring, scratch_addr);
582         scratch_addr += 128;
583         PIPE_CONTROL_FLUSH(ring, scratch_addr);
584         scratch_addr += 128;
585         PIPE_CONTROL_FLUSH(ring, scratch_addr);
586
587         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
588                         PIPE_CONTROL_WRITE_FLUSH |
589                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
590                         PIPE_CONTROL_NOTIFY);
591         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
592         intel_ring_emit(ring, seqno);
593         intel_ring_emit(ring, 0);
594         intel_ring_advance(ring);
595
596         *result = seqno;
597         return 0;
598 }
599
600 static int
601 render_ring_add_request(struct intel_ring_buffer *ring,
602                         u32 *result)
603 {
604         u32 seqno = i915_gem_next_request_seqno(ring);
605         int ret;
606
607         ret = intel_ring_begin(ring, 4);
608         if (ret)
609                 return ret;
610
611         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
612         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
613         intel_ring_emit(ring, seqno);
614         intel_ring_emit(ring, MI_USER_INTERRUPT);
615         intel_ring_advance(ring);
616
617         *result = seqno;
618         return 0;
619 }
620
621 static u32
622 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
623 {
624         struct drm_device *dev = ring->dev;
625
626         /* Workaround to force correct ordering between irq and seqno writes on
627          * ivb (and maybe also on snb) by reading from a CS register (like
628          * ACTHD) before reading the status page. */
629         if (IS_GEN6(dev) || IS_GEN7(dev))
630                 intel_ring_get_active_head(ring);
631         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
632 }
633
634 static u32
635 ring_get_seqno(struct intel_ring_buffer *ring)
636 {
637         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
638 }
639
640 static u32
641 pc_render_get_seqno(struct intel_ring_buffer *ring)
642 {
643         struct pipe_control *pc = ring->private;
644         return pc->cpu_page[0];
645 }
646
647 static void
648 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
649 {
650         dev_priv->gt_irq_mask &= ~mask;
651         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
652         POSTING_READ(GTIMR);
653 }
654
655 static void
656 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
657 {
658         dev_priv->gt_irq_mask |= mask;
659         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
660         POSTING_READ(GTIMR);
661 }
662
663 static void
664 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
665 {
666         dev_priv->irq_mask &= ~mask;
667         I915_WRITE(IMR, dev_priv->irq_mask);
668         POSTING_READ(IMR);
669 }
670
671 static void
672 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
673 {
674         dev_priv->irq_mask |= mask;
675         I915_WRITE(IMR, dev_priv->irq_mask);
676         POSTING_READ(IMR);
677 }
678
679 static bool
680 render_ring_get_irq(struct intel_ring_buffer *ring)
681 {
682         struct drm_device *dev = ring->dev;
683         drm_i915_private_t *dev_priv = dev->dev_private;
684
685         if (!dev->irq_enabled)
686                 return false;
687
688         spin_lock(&ring->irq_lock);
689         if (ring->irq_refcount++ == 0) {
690                 if (INTEL_INFO(dev)->gen >= 5)
691                         ironlake_enable_irq(dev_priv,
692                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
693                 else
694                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
695         }
696         spin_unlock(&ring->irq_lock);
697
698         return true;
699 }
700
701 static void
702 render_ring_put_irq(struct intel_ring_buffer *ring)
703 {
704         struct drm_device *dev = ring->dev;
705         drm_i915_private_t *dev_priv = dev->dev_private;
706
707         spin_lock(&ring->irq_lock);
708         if (--ring->irq_refcount == 0) {
709                 if (INTEL_INFO(dev)->gen >= 5)
710                         ironlake_disable_irq(dev_priv,
711                                              GT_USER_INTERRUPT |
712                                              GT_PIPE_NOTIFY);
713                 else
714                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
715         }
716         spin_unlock(&ring->irq_lock);
717 }
718
719 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
720 {
721         struct drm_device *dev = ring->dev;
722         drm_i915_private_t *dev_priv = ring->dev->dev_private;
723         u32 mmio = 0;
724
725         /* The ring status page addresses are no longer next to the rest of
726          * the ring registers as of gen7.
727          */
728         if (IS_GEN7(dev)) {
729                 switch (ring->id) {
730                 case RCS:
731                         mmio = RENDER_HWS_PGA_GEN7;
732                         break;
733                 case BCS:
734                         mmio = BLT_HWS_PGA_GEN7;
735                         break;
736                 case VCS:
737                         mmio = BSD_HWS_PGA_GEN7;
738                         break;
739                 }
740         } else if (IS_GEN6(ring->dev)) {
741                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
742         } else {
743                 mmio = RING_HWS_PGA(ring->mmio_base);
744         }
745
746         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
747         POSTING_READ(mmio);
748 }
749
750 static int
751 bsd_ring_flush(struct intel_ring_buffer *ring,
752                u32     invalidate_domains,
753                u32     flush_domains)
754 {
755         int ret;
756
757         ret = intel_ring_begin(ring, 2);
758         if (ret)
759                 return ret;
760
761         intel_ring_emit(ring, MI_FLUSH);
762         intel_ring_emit(ring, MI_NOOP);
763         intel_ring_advance(ring);
764         return 0;
765 }
766
767 static int
768 ring_add_request(struct intel_ring_buffer *ring,
769                  u32 *result)
770 {
771         u32 seqno;
772         int ret;
773
774         ret = intel_ring_begin(ring, 4);
775         if (ret)
776                 return ret;
777
778         seqno = i915_gem_next_request_seqno(ring);
779
780         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
781         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
782         intel_ring_emit(ring, seqno);
783         intel_ring_emit(ring, MI_USER_INTERRUPT);
784         intel_ring_advance(ring);
785
786         *result = seqno;
787         return 0;
788 }
789
790 static bool
791 gen6_ring_get_irq(struct intel_ring_buffer *ring)
792 {
793         struct drm_device *dev = ring->dev;
794         drm_i915_private_t *dev_priv = dev->dev_private;
795         u32 mask = ring->irq_enable;
796
797         if (!dev->irq_enabled)
798                return false;
799
800         /* It looks like we need to prevent the gt from suspending while waiting
801          * for an notifiy irq, otherwise irqs seem to get lost on at least the
802          * blt/bsd rings on ivb. */
803         gen6_gt_force_wake_get(dev_priv);
804
805         spin_lock(&ring->irq_lock);
806         if (ring->irq_refcount++ == 0) {
807                 ring->irq_mask &= ~mask;
808                 I915_WRITE_IMR(ring, ring->irq_mask);
809                 ironlake_enable_irq(dev_priv, mask);
810         }
811         spin_unlock(&ring->irq_lock);
812
813         return true;
814 }
815
816 static void
817 gen6_ring_put_irq(struct intel_ring_buffer *ring)
818 {
819         struct drm_device *dev = ring->dev;
820         drm_i915_private_t *dev_priv = dev->dev_private;
821         u32 mask = ring->irq_enable;
822
823         spin_lock(&ring->irq_lock);
824         if (--ring->irq_refcount == 0) {
825                 ring->irq_mask |= mask;
826                 I915_WRITE_IMR(ring, ring->irq_mask);
827                 ironlake_disable_irq(dev_priv, mask);
828         }
829         spin_unlock(&ring->irq_lock);
830
831         gen6_gt_force_wake_put(dev_priv);
832 }
833
834 static bool
835 bsd_ring_get_irq(struct intel_ring_buffer *ring)
836 {
837         struct drm_device *dev = ring->dev;
838         drm_i915_private_t *dev_priv = dev->dev_private;
839
840         if (!dev->irq_enabled)
841                 return false;
842
843         spin_lock(&ring->irq_lock);
844         if (ring->irq_refcount++ == 0) {
845                 if (IS_G4X(dev))
846                         i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
847                 else
848                         ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
849         }
850         spin_unlock(&ring->irq_lock);
851
852         return true;
853 }
854 static void
855 bsd_ring_put_irq(struct intel_ring_buffer *ring)
856 {
857         struct drm_device *dev = ring->dev;
858         drm_i915_private_t *dev_priv = dev->dev_private;
859
860         spin_lock(&ring->irq_lock);
861         if (--ring->irq_refcount == 0) {
862                 if (IS_G4X(dev))
863                         i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
864                 else
865                         ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
866         }
867         spin_unlock(&ring->irq_lock);
868 }
869
870 static int
871 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
872 {
873         int ret;
874
875         ret = intel_ring_begin(ring, 2);
876         if (ret)
877                 return ret;
878
879         intel_ring_emit(ring,
880                         MI_BATCH_BUFFER_START | (2 << 6) |
881                         MI_BATCH_NON_SECURE_I965);
882         intel_ring_emit(ring, offset);
883         intel_ring_advance(ring);
884
885         return 0;
886 }
887
888 static int
889 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
890                                 u32 offset, u32 len)
891 {
892         struct drm_device *dev = ring->dev;
893         int ret;
894
895         if (IS_I830(dev) || IS_845G(dev)) {
896                 ret = intel_ring_begin(ring, 4);
897                 if (ret)
898                         return ret;
899
900                 intel_ring_emit(ring, MI_BATCH_BUFFER);
901                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
902                 intel_ring_emit(ring, offset + len - 8);
903                 intel_ring_emit(ring, 0);
904         } else {
905                 ret = intel_ring_begin(ring, 2);
906                 if (ret)
907                         return ret;
908
909                 if (INTEL_INFO(dev)->gen >= 4) {
910                         intel_ring_emit(ring,
911                                         MI_BATCH_BUFFER_START | (2 << 6) |
912                                         MI_BATCH_NON_SECURE_I965);
913                         intel_ring_emit(ring, offset);
914                 } else {
915                         intel_ring_emit(ring,
916                                         MI_BATCH_BUFFER_START | (2 << 6));
917                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
918                 }
919         }
920         intel_ring_advance(ring);
921
922         return 0;
923 }
924
925 static void cleanup_status_page(struct intel_ring_buffer *ring)
926 {
927         drm_i915_private_t *dev_priv = ring->dev->dev_private;
928         struct drm_i915_gem_object *obj;
929
930         obj = ring->status_page.obj;
931         if (obj == NULL)
932                 return;
933
934         kunmap(obj->pages[0]);
935         i915_gem_object_unpin(obj);
936         drm_gem_object_unreference(&obj->base);
937         ring->status_page.obj = NULL;
938
939         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
940 }
941
942 static int init_status_page(struct intel_ring_buffer *ring)
943 {
944         struct drm_device *dev = ring->dev;
945         drm_i915_private_t *dev_priv = dev->dev_private;
946         struct drm_i915_gem_object *obj;
947         int ret;
948
949         obj = i915_gem_alloc_object(dev, 4096);
950         if (obj == NULL) {
951                 DRM_ERROR("Failed to allocate status page\n");
952                 ret = -ENOMEM;
953                 goto err;
954         }
955
956         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
957
958         ret = i915_gem_object_pin(obj, 4096, true);
959         if (ret != 0) {
960                 goto err_unref;
961         }
962
963         ring->status_page.gfx_addr = obj->gtt_offset;
964         ring->status_page.page_addr = kmap(obj->pages[0]);
965         if (ring->status_page.page_addr == NULL) {
966                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
967                 goto err_unpin;
968         }
969         ring->status_page.obj = obj;
970         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
971
972         intel_ring_setup_status_page(ring);
973         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
974                         ring->name, ring->status_page.gfx_addr);
975
976         return 0;
977
978 err_unpin:
979         i915_gem_object_unpin(obj);
980 err_unref:
981         drm_gem_object_unreference(&obj->base);
982 err:
983         return ret;
984 }
985
986 int intel_init_ring_buffer(struct drm_device *dev,
987                            struct intel_ring_buffer *ring)
988 {
989         struct drm_i915_gem_object *obj;
990         int ret;
991
992         ring->dev = dev;
993         INIT_LIST_HEAD(&ring->active_list);
994         INIT_LIST_HEAD(&ring->request_list);
995         INIT_LIST_HEAD(&ring->gpu_write_list);
996
997         init_waitqueue_head(&ring->irq_queue);
998         spin_lock_init(&ring->irq_lock);
999         ring->irq_mask = ~0;
1000
1001         if (I915_NEED_GFX_HWS(dev)) {
1002                 ret = init_status_page(ring);
1003                 if (ret)
1004                         return ret;
1005         }
1006
1007         obj = i915_gem_alloc_object(dev, ring->size);
1008         if (obj == NULL) {
1009                 DRM_ERROR("Failed to allocate ringbuffer\n");
1010                 ret = -ENOMEM;
1011                 goto err_hws;
1012         }
1013
1014         ring->obj = obj;
1015
1016         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1017         if (ret)
1018                 goto err_unref;
1019
1020         ring->map.size = ring->size;
1021         ring->map.offset = dev->agp->base + obj->gtt_offset;
1022         ring->map.type = 0;
1023         ring->map.flags = 0;
1024         ring->map.mtrr = 0;
1025
1026         drm_core_ioremap_wc(&ring->map, dev);
1027         if (ring->map.handle == NULL) {
1028                 DRM_ERROR("Failed to map ringbuffer.\n");
1029                 ret = -EINVAL;
1030                 goto err_unpin;
1031         }
1032
1033         ring->virtual_start = ring->map.handle;
1034         ret = ring->init(ring);
1035         if (ret)
1036                 goto err_unmap;
1037
1038         /* Workaround an erratum on the i830 which causes a hang if
1039          * the TAIL pointer points to within the last 2 cachelines
1040          * of the buffer.
1041          */
1042         ring->effective_size = ring->size;
1043         if (IS_I830(ring->dev))
1044                 ring->effective_size -= 128;
1045
1046         return 0;
1047
1048 err_unmap:
1049         drm_core_ioremapfree(&ring->map, dev);
1050 err_unpin:
1051         i915_gem_object_unpin(obj);
1052 err_unref:
1053         drm_gem_object_unreference(&obj->base);
1054         ring->obj = NULL;
1055 err_hws:
1056         cleanup_status_page(ring);
1057         return ret;
1058 }
1059
1060 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1061 {
1062         struct drm_i915_private *dev_priv;
1063         int ret;
1064
1065         if (ring->obj == NULL)
1066                 return;
1067
1068         /* Disable the ring buffer. The ring must be idle at this point */
1069         dev_priv = ring->dev->dev_private;
1070         ret = intel_wait_ring_idle(ring);
1071         if (ret)
1072                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1073                           ring->name, ret);
1074
1075         I915_WRITE_CTL(ring, 0);
1076
1077         drm_core_ioremapfree(&ring->map, ring->dev);
1078
1079         i915_gem_object_unpin(ring->obj);
1080         drm_gem_object_unreference(&ring->obj->base);
1081         ring->obj = NULL;
1082
1083         if (ring->cleanup)
1084                 ring->cleanup(ring);
1085
1086         cleanup_status_page(ring);
1087 }
1088
1089 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1090 {
1091         unsigned int *virt;
1092         int rem = ring->size - ring->tail;
1093
1094         if (ring->space < rem) {
1095                 int ret = intel_wait_ring_buffer(ring, rem);
1096                 if (ret)
1097                         return ret;
1098         }
1099
1100         virt = (unsigned int *)(ring->virtual_start + ring->tail);
1101         rem /= 8;
1102         while (rem--) {
1103                 *virt++ = MI_NOOP;
1104                 *virt++ = MI_NOOP;
1105         }
1106
1107         ring->tail = 0;
1108         ring->space = ring_space(ring);
1109
1110         return 0;
1111 }
1112
1113 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1114 {
1115         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1116         bool was_interruptible;
1117         int ret;
1118
1119         /* XXX As we have not yet audited all the paths to check that
1120          * they are ready for ERESTARTSYS from intel_ring_begin, do not
1121          * allow us to be interruptible by a signal.
1122          */
1123         was_interruptible = dev_priv->mm.interruptible;
1124         dev_priv->mm.interruptible = false;
1125
1126         ret = i915_wait_request(ring, seqno, true);
1127
1128         dev_priv->mm.interruptible = was_interruptible;
1129
1130         return ret;
1131 }
1132
1133 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1134 {
1135         struct drm_i915_gem_request *request;
1136         u32 seqno = 0;
1137         int ret;
1138
1139         i915_gem_retire_requests_ring(ring);
1140
1141         if (ring->last_retired_head != -1) {
1142                 ring->head = ring->last_retired_head;
1143                 ring->last_retired_head = -1;
1144                 ring->space = ring_space(ring);
1145                 if (ring->space >= n)
1146                         return 0;
1147         }
1148
1149         list_for_each_entry(request, &ring->request_list, list) {
1150                 int space;
1151
1152                 if (request->tail == -1)
1153                         continue;
1154
1155                 space = request->tail - (ring->tail + 8);
1156                 if (space < 0)
1157                         space += ring->size;
1158                 if (space >= n) {
1159                         seqno = request->seqno;
1160                         break;
1161                 }
1162
1163                 /* Consume this request in case we need more space than
1164                  * is available and so need to prevent a race between
1165                  * updating last_retired_head and direct reads of
1166                  * I915_RING_HEAD. It also provides a nice sanity check.
1167                  */
1168                 request->tail = -1;
1169         }
1170
1171         if (seqno == 0)
1172                 return -ENOSPC;
1173
1174         ret = intel_ring_wait_seqno(ring, seqno);
1175         if (ret)
1176                 return ret;
1177
1178         if (WARN_ON(ring->last_retired_head == -1))
1179                 return -ENOSPC;
1180
1181         ring->head = ring->last_retired_head;
1182         ring->last_retired_head = -1;
1183         ring->space = ring_space(ring);
1184         if (WARN_ON(ring->space < n))
1185                 return -ENOSPC;
1186
1187         return 0;
1188 }
1189
1190 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1191 {
1192         struct drm_device *dev = ring->dev;
1193         struct drm_i915_private *dev_priv = dev->dev_private;
1194         unsigned long end;
1195         int ret;
1196
1197         ret = intel_ring_wait_request(ring, n);
1198         if (ret != -ENOSPC)
1199                 return ret;
1200
1201         trace_i915_ring_wait_begin(ring);
1202         if (drm_core_check_feature(dev, DRIVER_GEM))
1203                 /* With GEM the hangcheck timer should kick us out of the loop,
1204                  * leaving it early runs the risk of corrupting GEM state (due
1205                  * to running on almost untested codepaths). But on resume
1206                  * timers don't work yet, so prevent a complete hang in that
1207                  * case by choosing an insanely large timeout. */
1208                 end = jiffies + 60 * HZ;
1209         else
1210                 end = jiffies + 3 * HZ;
1211
1212         do {
1213                 ring->head = I915_READ_HEAD(ring);
1214                 ring->space = ring_space(ring);
1215                 if (ring->space >= n) {
1216                         trace_i915_ring_wait_end(ring);
1217                         return 0;
1218                 }
1219
1220                 if (dev->primary->master) {
1221                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1222                         if (master_priv->sarea_priv)
1223                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1224                 }
1225
1226                 msleep(1);
1227                 if (atomic_read(&dev_priv->mm.wedged))
1228                         return -EAGAIN;
1229         } while (!time_after(jiffies, end));
1230         trace_i915_ring_wait_end(ring);
1231         return -EBUSY;
1232 }
1233
1234 int intel_ring_begin(struct intel_ring_buffer *ring,
1235                      int num_dwords)
1236 {
1237         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1238         int n = 4*num_dwords;
1239         int ret;
1240
1241         if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1242                 return -EIO;
1243
1244         if (unlikely(ring->tail + n > ring->effective_size)) {
1245                 ret = intel_wrap_ring_buffer(ring);
1246                 if (unlikely(ret))
1247                         return ret;
1248         }
1249
1250         if (unlikely(ring->space < n)) {
1251                 ret = intel_wait_ring_buffer(ring, n);
1252                 if (unlikely(ret))
1253                         return ret;
1254         }
1255
1256         ring->space -= n;
1257         return 0;
1258 }
1259
1260 void intel_ring_advance(struct intel_ring_buffer *ring)
1261 {
1262         ring->tail &= ring->size - 1;
1263         ring->write_tail(ring, ring->tail);
1264 }
1265
1266 static const struct intel_ring_buffer render_ring = {
1267         .name                   = "render ring",
1268         .id                     = RCS,
1269         .mmio_base              = RENDER_RING_BASE,
1270         .size                   = 32 * PAGE_SIZE,
1271         .init                   = init_render_ring,
1272         .write_tail             = ring_write_tail,
1273         .flush                  = render_ring_flush,
1274         .add_request            = render_ring_add_request,
1275         .get_seqno              = ring_get_seqno,
1276         .irq_get                = render_ring_get_irq,
1277         .irq_put                = render_ring_put_irq,
1278         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1279         .cleanup                = render_ring_cleanup,
1280         .sync_to                = render_ring_sync_to,
1281         .semaphore_register     = {MI_SEMAPHORE_SYNC_INVALID,
1282                                    MI_SEMAPHORE_SYNC_RV,
1283                                    MI_SEMAPHORE_SYNC_RB},
1284         .signal_mbox            = {GEN6_VRSYNC, GEN6_BRSYNC},
1285 };
1286
1287 /* ring buffer for bit-stream decoder */
1288
1289 static const struct intel_ring_buffer bsd_ring = {
1290         .name                   = "bsd ring",
1291         .id                     = VCS,
1292         .mmio_base              = BSD_RING_BASE,
1293         .size                   = 32 * PAGE_SIZE,
1294         .init                   = init_ring_common,
1295         .write_tail             = ring_write_tail,
1296         .flush                  = bsd_ring_flush,
1297         .add_request            = ring_add_request,
1298         .get_seqno              = ring_get_seqno,
1299         .irq_get                = bsd_ring_get_irq,
1300         .irq_put                = bsd_ring_put_irq,
1301         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1302 };
1303
1304
1305 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1306                                      u32 value)
1307 {
1308         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1309
1310        /* Every tail move must follow the sequence below */
1311         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1312                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1313                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1314         I915_WRITE(GEN6_BSD_RNCID, 0x0);
1315
1316         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1317                 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1318                 50))
1319         DRM_ERROR("timed out waiting for IDLE Indicator\n");
1320
1321         I915_WRITE_TAIL(ring, value);
1322         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1323                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1324                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1325 }
1326
1327 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1328                            u32 invalidate, u32 flush)
1329 {
1330         uint32_t cmd;
1331         int ret;
1332
1333         ret = intel_ring_begin(ring, 4);
1334         if (ret)
1335                 return ret;
1336
1337         cmd = MI_FLUSH_DW;
1338         if (invalidate & I915_GEM_GPU_DOMAINS)
1339                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1340         intel_ring_emit(ring, cmd);
1341         intel_ring_emit(ring, 0);
1342         intel_ring_emit(ring, 0);
1343         intel_ring_emit(ring, MI_NOOP);
1344         intel_ring_advance(ring);
1345         return 0;
1346 }
1347
1348 static int
1349 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1350                               u32 offset, u32 len)
1351 {
1352         int ret;
1353
1354         ret = intel_ring_begin(ring, 2);
1355         if (ret)
1356                 return ret;
1357
1358         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1359         /* bit0-7 is the length on GEN6+ */
1360         intel_ring_emit(ring, offset);
1361         intel_ring_advance(ring);
1362
1363         return 0;
1364 }
1365
1366 /* ring buffer for Video Codec for Gen6+ */
1367 static const struct intel_ring_buffer gen6_bsd_ring = {
1368         .name                   = "gen6 bsd ring",
1369         .id                     = VCS,
1370         .mmio_base              = GEN6_BSD_RING_BASE,
1371         .size                   = 32 * PAGE_SIZE,
1372         .init                   = init_ring_common,
1373         .write_tail             = gen6_bsd_ring_write_tail,
1374         .flush                  = gen6_ring_flush,
1375         .add_request            = gen6_add_request,
1376         .get_seqno              = gen6_ring_get_seqno,
1377         .irq_enable             = GEN6_BSD_USER_INTERRUPT,
1378         .irq_get                = gen6_ring_get_irq,
1379         .irq_put                = gen6_ring_put_irq,
1380         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1381         .sync_to                = gen6_bsd_ring_sync_to,
1382         .semaphore_register     = {MI_SEMAPHORE_SYNC_VR,
1383                                    MI_SEMAPHORE_SYNC_INVALID,
1384                                    MI_SEMAPHORE_SYNC_VB},
1385         .signal_mbox            = {GEN6_RVSYNC, GEN6_BVSYNC},
1386 };
1387
1388 /* Blitter support (SandyBridge+) */
1389
1390 static int blt_ring_flush(struct intel_ring_buffer *ring,
1391                           u32 invalidate, u32 flush)
1392 {
1393         uint32_t cmd;
1394         int ret;
1395
1396         ret = intel_ring_begin(ring, 4);
1397         if (ret)
1398                 return ret;
1399
1400         cmd = MI_FLUSH_DW;
1401         if (invalidate & I915_GEM_DOMAIN_RENDER)
1402                 cmd |= MI_INVALIDATE_TLB;
1403         intel_ring_emit(ring, cmd);
1404         intel_ring_emit(ring, 0);
1405         intel_ring_emit(ring, 0);
1406         intel_ring_emit(ring, MI_NOOP);
1407         intel_ring_advance(ring);
1408         return 0;
1409 }
1410
1411 static const struct intel_ring_buffer gen6_blt_ring = {
1412         .name                   = "blt ring",
1413         .id                     = BCS,
1414         .mmio_base              = BLT_RING_BASE,
1415         .size                   = 32 * PAGE_SIZE,
1416         .init                   = init_ring_common,
1417         .write_tail             = ring_write_tail,
1418         .flush                  = blt_ring_flush,
1419         .add_request            = gen6_add_request,
1420         .get_seqno              = gen6_ring_get_seqno,
1421         .irq_get                = gen6_ring_get_irq,
1422         .irq_put                = gen6_ring_put_irq,
1423         .irq_enable             = GEN6_BLITTER_USER_INTERRUPT,
1424         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1425         .sync_to                = gen6_blt_ring_sync_to,
1426         .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
1427                                    MI_SEMAPHORE_SYNC_BV,
1428                                    MI_SEMAPHORE_SYNC_INVALID},
1429         .signal_mbox            = {GEN6_RBSYNC, GEN6_VBSYNC},
1430 };
1431
1432 int intel_init_render_ring_buffer(struct drm_device *dev)
1433 {
1434         drm_i915_private_t *dev_priv = dev->dev_private;
1435         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1436
1437         *ring = render_ring;
1438         if (INTEL_INFO(dev)->gen >= 6) {
1439                 ring->add_request = gen6_add_request;
1440                 ring->flush = gen6_render_ring_flush;
1441                 ring->irq_get = gen6_ring_get_irq;
1442                 ring->irq_put = gen6_ring_put_irq;
1443                 ring->irq_enable = GT_USER_INTERRUPT;
1444                 ring->get_seqno = gen6_ring_get_seqno;
1445         } else if (IS_GEN5(dev)) {
1446                 ring->add_request = pc_render_add_request;
1447                 ring->get_seqno = pc_render_get_seqno;
1448         }
1449
1450         if (!I915_NEED_GFX_HWS(dev)) {
1451                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1452                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1453         }
1454
1455         return intel_init_ring_buffer(dev, ring);
1456 }
1457
1458 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1459 {
1460         drm_i915_private_t *dev_priv = dev->dev_private;
1461         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1462
1463         *ring = render_ring;
1464         if (INTEL_INFO(dev)->gen >= 6) {
1465                 ring->add_request = gen6_add_request;
1466                 ring->irq_get = gen6_ring_get_irq;
1467                 ring->irq_put = gen6_ring_put_irq;
1468                 ring->irq_enable = GT_USER_INTERRUPT;
1469         } else if (IS_GEN5(dev)) {
1470                 ring->add_request = pc_render_add_request;
1471                 ring->get_seqno = pc_render_get_seqno;
1472         }
1473
1474         if (!I915_NEED_GFX_HWS(dev))
1475                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1476
1477         ring->dev = dev;
1478         INIT_LIST_HEAD(&ring->active_list);
1479         INIT_LIST_HEAD(&ring->request_list);
1480         INIT_LIST_HEAD(&ring->gpu_write_list);
1481
1482         ring->size = size;
1483         ring->effective_size = ring->size;
1484         if (IS_I830(ring->dev))
1485                 ring->effective_size -= 128;
1486
1487         ring->map.offset = start;
1488         ring->map.size = size;
1489         ring->map.type = 0;
1490         ring->map.flags = 0;
1491         ring->map.mtrr = 0;
1492
1493         drm_core_ioremap_wc(&ring->map, dev);
1494         if (ring->map.handle == NULL) {
1495                 DRM_ERROR("can not ioremap virtual address for"
1496                           " ring buffer\n");
1497                 return -ENOMEM;
1498         }
1499
1500         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1501         return 0;
1502 }
1503
1504 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1505 {
1506         drm_i915_private_t *dev_priv = dev->dev_private;
1507         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1508
1509         if (IS_GEN6(dev) || IS_GEN7(dev))
1510                 *ring = gen6_bsd_ring;
1511         else
1512                 *ring = bsd_ring;
1513
1514         return intel_init_ring_buffer(dev, ring);
1515 }
1516
1517 int intel_init_blt_ring_buffer(struct drm_device *dev)
1518 {
1519         drm_i915_private_t *dev_priv = dev->dev_private;
1520         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1521
1522         *ring = gen6_blt_ring;
1523
1524         return intel_init_ring_buffer(dev, ring);
1525 }