]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_ringbuffer.c
drm/i915: GFX_MODE Flush TLB Invalidate Mode must be '1' for scanline waits
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 /*
38  * 965+ support PIPE_CONTROL commands, which provide finer grained control
39  * over cache flushing.
40  */
41 struct pipe_control {
42         struct drm_i915_gem_object *obj;
43         volatile u32 *cpu_page;
44         u32 gtt_offset;
45 };
46
47 static inline int ring_space(struct intel_ring_buffer *ring)
48 {
49         int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
50         if (space < 0)
51                 space += ring->size;
52         return space;
53 }
54
55 static int
56 render_ring_flush(struct intel_ring_buffer *ring,
57                   u32   invalidate_domains,
58                   u32   flush_domains)
59 {
60         struct drm_device *dev = ring->dev;
61         u32 cmd;
62         int ret;
63
64         /*
65          * read/write caches:
66          *
67          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
68          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
69          * also flushed at 2d versus 3d pipeline switches.
70          *
71          * read-only caches:
72          *
73          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
74          * MI_READ_FLUSH is set, and is always flushed on 965.
75          *
76          * I915_GEM_DOMAIN_COMMAND may not exist?
77          *
78          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
79          * invalidated when MI_EXE_FLUSH is set.
80          *
81          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
82          * invalidated with every MI_FLUSH.
83          *
84          * TLBs:
85          *
86          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
87          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
88          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
89          * are flushed at any MI_FLUSH.
90          */
91
92         cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
93         if ((invalidate_domains|flush_domains) &
94             I915_GEM_DOMAIN_RENDER)
95                 cmd &= ~MI_NO_WRITE_FLUSH;
96         if (INTEL_INFO(dev)->gen < 4) {
97                 /*
98                  * On the 965, the sampler cache always gets flushed
99                  * and this bit is reserved.
100                  */
101                 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
102                         cmd |= MI_READ_FLUSH;
103         }
104         if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
105                 cmd |= MI_EXE_FLUSH;
106
107         if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
108             (IS_G4X(dev) || IS_GEN5(dev)))
109                 cmd |= MI_INVALIDATE_ISP;
110
111         ret = intel_ring_begin(ring, 2);
112         if (ret)
113                 return ret;
114
115         intel_ring_emit(ring, cmd);
116         intel_ring_emit(ring, MI_NOOP);
117         intel_ring_advance(ring);
118
119         return 0;
120 }
121
122 /**
123  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
124  * implementing two workarounds on gen6.  From section 1.4.7.1
125  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
126  *
127  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
128  * produced by non-pipelined state commands), software needs to first
129  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
130  * 0.
131  *
132  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
133  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
134  *
135  * And the workaround for these two requires this workaround first:
136  *
137  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
138  * BEFORE the pipe-control with a post-sync op and no write-cache
139  * flushes.
140  *
141  * And this last workaround is tricky because of the requirements on
142  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
143  * volume 2 part 1:
144  *
145  *     "1 of the following must also be set:
146  *      - Render Target Cache Flush Enable ([12] of DW1)
147  *      - Depth Cache Flush Enable ([0] of DW1)
148  *      - Stall at Pixel Scoreboard ([1] of DW1)
149  *      - Depth Stall ([13] of DW1)
150  *      - Post-Sync Operation ([13] of DW1)
151  *      - Notify Enable ([8] of DW1)"
152  *
153  * The cache flushes require the workaround flush that triggered this
154  * one, so we can't use it.  Depth stall would trigger the same.
155  * Post-sync nonzero is what triggered this second workaround, so we
156  * can't use that one either.  Notify enable is IRQs, which aren't
157  * really our business.  That leaves only stall at scoreboard.
158  */
159 static int
160 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
161 {
162         struct pipe_control *pc = ring->private;
163         u32 scratch_addr = pc->gtt_offset + 128;
164         int ret;
165
166
167         ret = intel_ring_begin(ring, 6);
168         if (ret)
169                 return ret;
170
171         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
172         intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
173                         PIPE_CONTROL_STALL_AT_SCOREBOARD);
174         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
175         intel_ring_emit(ring, 0); /* low dword */
176         intel_ring_emit(ring, 0); /* high dword */
177         intel_ring_emit(ring, MI_NOOP);
178         intel_ring_advance(ring);
179
180         ret = intel_ring_begin(ring, 6);
181         if (ret)
182                 return ret;
183
184         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
185         intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
186         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
187         intel_ring_emit(ring, 0);
188         intel_ring_emit(ring, 0);
189         intel_ring_emit(ring, MI_NOOP);
190         intel_ring_advance(ring);
191
192         return 0;
193 }
194
195 static int
196 gen6_render_ring_flush(struct intel_ring_buffer *ring,
197                          u32 invalidate_domains, u32 flush_domains)
198 {
199         u32 flags = 0;
200         struct pipe_control *pc = ring->private;
201         u32 scratch_addr = pc->gtt_offset + 128;
202         int ret;
203
204         /* Force SNB workarounds for PIPE_CONTROL flushes */
205         intel_emit_post_sync_nonzero_flush(ring);
206
207         /* Just flush everything.  Experiments have shown that reducing the
208          * number of bits based on the write domains has little performance
209          * impact.
210          */
211         flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
212         flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
213         flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
214         flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
215         flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
216         flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
217         flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
218
219         ret = intel_ring_begin(ring, 6);
220         if (ret)
221                 return ret;
222
223         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
224         intel_ring_emit(ring, flags);
225         intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
226         intel_ring_emit(ring, 0); /* lower dword */
227         intel_ring_emit(ring, 0); /* uppwer dword */
228         intel_ring_emit(ring, MI_NOOP);
229         intel_ring_advance(ring);
230
231         return 0;
232 }
233
234 static void ring_write_tail(struct intel_ring_buffer *ring,
235                             u32 value)
236 {
237         drm_i915_private_t *dev_priv = ring->dev->dev_private;
238         I915_WRITE_TAIL(ring, value);
239 }
240
241 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
242 {
243         drm_i915_private_t *dev_priv = ring->dev->dev_private;
244         u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
245                         RING_ACTHD(ring->mmio_base) : ACTHD;
246
247         return I915_READ(acthd_reg);
248 }
249
250 static int init_ring_common(struct intel_ring_buffer *ring)
251 {
252         struct drm_device *dev = ring->dev;
253         drm_i915_private_t *dev_priv = dev->dev_private;
254         struct drm_i915_gem_object *obj = ring->obj;
255         int ret = 0;
256         u32 head;
257
258         if (HAS_FORCE_WAKE(dev))
259                 gen6_gt_force_wake_get(dev_priv);
260
261         /* Stop the ring if it's running. */
262         I915_WRITE_CTL(ring, 0);
263         I915_WRITE_HEAD(ring, 0);
264         ring->write_tail(ring, 0);
265
266         head = I915_READ_HEAD(ring) & HEAD_ADDR;
267
268         /* G45 ring initialization fails to reset head to zero */
269         if (head != 0) {
270                 DRM_DEBUG_KMS("%s head not reset to zero "
271                               "ctl %08x head %08x tail %08x start %08x\n",
272                               ring->name,
273                               I915_READ_CTL(ring),
274                               I915_READ_HEAD(ring),
275                               I915_READ_TAIL(ring),
276                               I915_READ_START(ring));
277
278                 I915_WRITE_HEAD(ring, 0);
279
280                 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
281                         DRM_ERROR("failed to set %s head to zero "
282                                   "ctl %08x head %08x tail %08x start %08x\n",
283                                   ring->name,
284                                   I915_READ_CTL(ring),
285                                   I915_READ_HEAD(ring),
286                                   I915_READ_TAIL(ring),
287                                   I915_READ_START(ring));
288                 }
289         }
290
291         /* Initialize the ring. This must happen _after_ we've cleared the ring
292          * registers with the above sequence (the readback of the HEAD registers
293          * also enforces ordering), otherwise the hw might lose the new ring
294          * register values. */
295         I915_WRITE_START(ring, obj->gtt_offset);
296         I915_WRITE_CTL(ring,
297                         ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
298                         | RING_VALID);
299
300         /* If the head is still not zero, the ring is dead */
301         if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
302                      I915_READ_START(ring) == obj->gtt_offset &&
303                      (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
304                 DRM_ERROR("%s initialization failed "
305                                 "ctl %08x head %08x tail %08x start %08x\n",
306                                 ring->name,
307                                 I915_READ_CTL(ring),
308                                 I915_READ_HEAD(ring),
309                                 I915_READ_TAIL(ring),
310                                 I915_READ_START(ring));
311                 ret = -EIO;
312                 goto out;
313         }
314
315         if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
316                 i915_kernel_lost_context(ring->dev);
317         else {
318                 ring->head = I915_READ_HEAD(ring);
319                 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
320                 ring->space = ring_space(ring);
321         }
322
323 out:
324         if (HAS_FORCE_WAKE(dev))
325                 gen6_gt_force_wake_put(dev_priv);
326
327         return ret;
328 }
329
330 static int
331 init_pipe_control(struct intel_ring_buffer *ring)
332 {
333         struct pipe_control *pc;
334         struct drm_i915_gem_object *obj;
335         int ret;
336
337         if (ring->private)
338                 return 0;
339
340         pc = kmalloc(sizeof(*pc), GFP_KERNEL);
341         if (!pc)
342                 return -ENOMEM;
343
344         obj = i915_gem_alloc_object(ring->dev, 4096);
345         if (obj == NULL) {
346                 DRM_ERROR("Failed to allocate seqno page\n");
347                 ret = -ENOMEM;
348                 goto err;
349         }
350
351         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
352
353         ret = i915_gem_object_pin(obj, 4096, true);
354         if (ret)
355                 goto err_unref;
356
357         pc->gtt_offset = obj->gtt_offset;
358         pc->cpu_page =  kmap(obj->pages[0]);
359         if (pc->cpu_page == NULL)
360                 goto err_unpin;
361
362         pc->obj = obj;
363         ring->private = pc;
364         return 0;
365
366 err_unpin:
367         i915_gem_object_unpin(obj);
368 err_unref:
369         drm_gem_object_unreference(&obj->base);
370 err:
371         kfree(pc);
372         return ret;
373 }
374
375 static void
376 cleanup_pipe_control(struct intel_ring_buffer *ring)
377 {
378         struct pipe_control *pc = ring->private;
379         struct drm_i915_gem_object *obj;
380
381         if (!ring->private)
382                 return;
383
384         obj = pc->obj;
385         kunmap(obj->pages[0]);
386         i915_gem_object_unpin(obj);
387         drm_gem_object_unreference(&obj->base);
388
389         kfree(pc);
390         ring->private = NULL;
391 }
392
393 static int init_render_ring(struct intel_ring_buffer *ring)
394 {
395         struct drm_device *dev = ring->dev;
396         struct drm_i915_private *dev_priv = dev->dev_private;
397         int ret = init_ring_common(ring);
398
399         if (INTEL_INFO(dev)->gen > 3) {
400                 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
401                 I915_WRITE(MI_MODE, mode);
402         }
403
404         /* We need to disable the AsyncFlip performance optimisations in order
405          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
406          * programmed to '1' on all products.
407          */
408         if (INTEL_INFO(dev)->gen >= 6)
409                 I915_WRITE(MI_MODE, GFX_MODE_ENABLE(ASYNC_FLIP_PERF_DISABLE));
410
411         /* Required for the hardware to program scanline values for waiting */
412         if (INTEL_INFO(dev)->gen == 6)
413                 I915_WRITE(GFX_MODE,
414                            GFX_MODE_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
415
416         if (IS_GEN7(dev))
417                 I915_WRITE(GFX_MODE_GEN7,
418                            GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
419                            GFX_MODE_ENABLE(GFX_REPLAY_MODE));
420
421         if (INTEL_INFO(dev)->gen >= 5) {
422                 ret = init_pipe_control(ring);
423                 if (ret)
424                         return ret;
425         }
426
427
428         if (IS_GEN6(dev)) {
429                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
430                  * "If this bit is set, STCunit will have LRA as replacement
431                  *  policy. [...] This bit must be reset.  LRA replacement
432                  *  policy is not supported."
433                  */
434                 I915_WRITE(CACHE_MODE_0,
435                            CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
436         }
437
438         if (INTEL_INFO(dev)->gen >= 6) {
439                 I915_WRITE(INSTPM,
440                            INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
441         }
442
443         return ret;
444 }
445
446 static void render_ring_cleanup(struct intel_ring_buffer *ring)
447 {
448         if (!ring->private)
449                 return;
450
451         cleanup_pipe_control(ring);
452 }
453
454 static void
455 update_mboxes(struct intel_ring_buffer *ring,
456             u32 seqno,
457             u32 mmio_offset)
458 {
459         intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
460                               MI_SEMAPHORE_GLOBAL_GTT |
461                               MI_SEMAPHORE_REGISTER |
462                               MI_SEMAPHORE_UPDATE);
463         intel_ring_emit(ring, seqno);
464         intel_ring_emit(ring, mmio_offset);
465 }
466
467 /**
468  * gen6_add_request - Update the semaphore mailbox registers
469  * 
470  * @ring - ring that is adding a request
471  * @seqno - return seqno stuck into the ring
472  *
473  * Update the mailbox registers in the *other* rings with the current seqno.
474  * This acts like a signal in the canonical semaphore.
475  */
476 static int
477 gen6_add_request(struct intel_ring_buffer *ring,
478                  u32 *seqno)
479 {
480         u32 mbox1_reg;
481         u32 mbox2_reg;
482         int ret;
483
484         ret = intel_ring_begin(ring, 10);
485         if (ret)
486                 return ret;
487
488         mbox1_reg = ring->signal_mbox[0];
489         mbox2_reg = ring->signal_mbox[1];
490
491         *seqno = i915_gem_next_request_seqno(ring);
492
493         update_mboxes(ring, *seqno, mbox1_reg);
494         update_mboxes(ring, *seqno, mbox2_reg);
495         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
496         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
497         intel_ring_emit(ring, *seqno);
498         intel_ring_emit(ring, MI_USER_INTERRUPT);
499         intel_ring_advance(ring);
500
501         return 0;
502 }
503
504 /**
505  * intel_ring_sync - sync the waiter to the signaller on seqno
506  *
507  * @waiter - ring that is waiting
508  * @signaller - ring which has, or will signal
509  * @seqno - seqno which the waiter will block on
510  */
511 static int
512 intel_ring_sync(struct intel_ring_buffer *waiter,
513                 struct intel_ring_buffer *signaller,
514                 int ring,
515                 u32 seqno)
516 {
517         int ret;
518         u32 dw1 = MI_SEMAPHORE_MBOX |
519                   MI_SEMAPHORE_COMPARE |
520                   MI_SEMAPHORE_REGISTER;
521
522         ret = intel_ring_begin(waiter, 4);
523         if (ret)
524                 return ret;
525
526         intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
527         intel_ring_emit(waiter, seqno);
528         intel_ring_emit(waiter, 0);
529         intel_ring_emit(waiter, MI_NOOP);
530         intel_ring_advance(waiter);
531
532         return 0;
533 }
534
535 /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
536 int
537 render_ring_sync_to(struct intel_ring_buffer *waiter,
538                     struct intel_ring_buffer *signaller,
539                     u32 seqno)
540 {
541         WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
542         return intel_ring_sync(waiter,
543                                signaller,
544                                RCS,
545                                seqno);
546 }
547
548 /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
549 int
550 gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
551                       struct intel_ring_buffer *signaller,
552                       u32 seqno)
553 {
554         WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
555         return intel_ring_sync(waiter,
556                                signaller,
557                                VCS,
558                                seqno);
559 }
560
561 /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
562 int
563 gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
564                       struct intel_ring_buffer *signaller,
565                       u32 seqno)
566 {
567         WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
568         return intel_ring_sync(waiter,
569                                signaller,
570                                BCS,
571                                seqno);
572 }
573
574
575
576 #define PIPE_CONTROL_FLUSH(ring__, addr__)                                      \
577 do {                                                                    \
578         intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
579                  PIPE_CONTROL_DEPTH_STALL);                             \
580         intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
581         intel_ring_emit(ring__, 0);                                                     \
582         intel_ring_emit(ring__, 0);                                                     \
583 } while (0)
584
585 static int
586 pc_render_add_request(struct intel_ring_buffer *ring,
587                       u32 *result)
588 {
589         u32 seqno = i915_gem_next_request_seqno(ring);
590         struct pipe_control *pc = ring->private;
591         u32 scratch_addr = pc->gtt_offset + 128;
592         int ret;
593
594         /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
595          * incoherent with writes to memory, i.e. completely fubar,
596          * so we need to use PIPE_NOTIFY instead.
597          *
598          * However, we also need to workaround the qword write
599          * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
600          * memory before requesting an interrupt.
601          */
602         ret = intel_ring_begin(ring, 32);
603         if (ret)
604                 return ret;
605
606         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
607                         PIPE_CONTROL_WRITE_FLUSH |
608                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
609         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
610         intel_ring_emit(ring, seqno);
611         intel_ring_emit(ring, 0);
612         PIPE_CONTROL_FLUSH(ring, scratch_addr);
613         scratch_addr += 128; /* write to separate cachelines */
614         PIPE_CONTROL_FLUSH(ring, scratch_addr);
615         scratch_addr += 128;
616         PIPE_CONTROL_FLUSH(ring, scratch_addr);
617         scratch_addr += 128;
618         PIPE_CONTROL_FLUSH(ring, scratch_addr);
619         scratch_addr += 128;
620         PIPE_CONTROL_FLUSH(ring, scratch_addr);
621         scratch_addr += 128;
622         PIPE_CONTROL_FLUSH(ring, scratch_addr);
623         intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
624                         PIPE_CONTROL_WRITE_FLUSH |
625                         PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
626                         PIPE_CONTROL_NOTIFY);
627         intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
628         intel_ring_emit(ring, seqno);
629         intel_ring_emit(ring, 0);
630         intel_ring_advance(ring);
631
632         *result = seqno;
633         return 0;
634 }
635
636 static int
637 render_ring_add_request(struct intel_ring_buffer *ring,
638                         u32 *result)
639 {
640         u32 seqno = i915_gem_next_request_seqno(ring);
641         int ret;
642
643         ret = intel_ring_begin(ring, 4);
644         if (ret)
645                 return ret;
646
647         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
648         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
649         intel_ring_emit(ring, seqno);
650         intel_ring_emit(ring, MI_USER_INTERRUPT);
651         intel_ring_advance(ring);
652
653         *result = seqno;
654         return 0;
655 }
656
657 static u32
658 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
659 {
660         struct drm_device *dev = ring->dev;
661
662         /* Workaround to force correct ordering between irq and seqno writes on
663          * ivb (and maybe also on snb) by reading from a CS register (like
664          * ACTHD) before reading the status page. */
665         if (IS_GEN7(dev))
666                 intel_ring_get_active_head(ring);
667         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
668 }
669
670 static u32
671 ring_get_seqno(struct intel_ring_buffer *ring)
672 {
673         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
674 }
675
676 static u32
677 pc_render_get_seqno(struct intel_ring_buffer *ring)
678 {
679         struct pipe_control *pc = ring->private;
680         return pc->cpu_page[0];
681 }
682
683 static void
684 ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
685 {
686         dev_priv->gt_irq_mask &= ~mask;
687         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
688         POSTING_READ(GTIMR);
689 }
690
691 static void
692 ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
693 {
694         dev_priv->gt_irq_mask |= mask;
695         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
696         POSTING_READ(GTIMR);
697 }
698
699 static void
700 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
701 {
702         dev_priv->irq_mask &= ~mask;
703         I915_WRITE(IMR, dev_priv->irq_mask);
704         POSTING_READ(IMR);
705 }
706
707 static void
708 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
709 {
710         dev_priv->irq_mask |= mask;
711         I915_WRITE(IMR, dev_priv->irq_mask);
712         POSTING_READ(IMR);
713 }
714
715 static bool
716 render_ring_get_irq(struct intel_ring_buffer *ring)
717 {
718         struct drm_device *dev = ring->dev;
719         drm_i915_private_t *dev_priv = dev->dev_private;
720
721         if (!dev->irq_enabled)
722                 return false;
723
724         spin_lock(&ring->irq_lock);
725         if (ring->irq_refcount++ == 0) {
726                 if (HAS_PCH_SPLIT(dev))
727                         ironlake_enable_irq(dev_priv,
728                                             GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
729                 else
730                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
731         }
732         spin_unlock(&ring->irq_lock);
733
734         return true;
735 }
736
737 static void
738 render_ring_put_irq(struct intel_ring_buffer *ring)
739 {
740         struct drm_device *dev = ring->dev;
741         drm_i915_private_t *dev_priv = dev->dev_private;
742
743         spin_lock(&ring->irq_lock);
744         if (--ring->irq_refcount == 0) {
745                 if (HAS_PCH_SPLIT(dev))
746                         ironlake_disable_irq(dev_priv,
747                                              GT_USER_INTERRUPT |
748                                              GT_PIPE_NOTIFY);
749                 else
750                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
751         }
752         spin_unlock(&ring->irq_lock);
753 }
754
755 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
756 {
757         struct drm_device *dev = ring->dev;
758         drm_i915_private_t *dev_priv = ring->dev->dev_private;
759         u32 mmio = 0;
760
761         /* The ring status page addresses are no longer next to the rest of
762          * the ring registers as of gen7.
763          */
764         if (IS_GEN7(dev)) {
765                 switch (ring->id) {
766                 case RING_RENDER:
767                         mmio = RENDER_HWS_PGA_GEN7;
768                         break;
769                 case RING_BLT:
770                         mmio = BLT_HWS_PGA_GEN7;
771                         break;
772                 case RING_BSD:
773                         mmio = BSD_HWS_PGA_GEN7;
774                         break;
775                 }
776         } else if (IS_GEN6(ring->dev)) {
777                 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
778         } else {
779                 mmio = RING_HWS_PGA(ring->mmio_base);
780         }
781
782         I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
783         POSTING_READ(mmio);
784 }
785
786 static int
787 bsd_ring_flush(struct intel_ring_buffer *ring,
788                u32     invalidate_domains,
789                u32     flush_domains)
790 {
791         int ret;
792
793         ret = intel_ring_begin(ring, 2);
794         if (ret)
795                 return ret;
796
797         intel_ring_emit(ring, MI_FLUSH);
798         intel_ring_emit(ring, MI_NOOP);
799         intel_ring_advance(ring);
800         return 0;
801 }
802
803 static int
804 ring_add_request(struct intel_ring_buffer *ring,
805                  u32 *result)
806 {
807         u32 seqno;
808         int ret;
809
810         ret = intel_ring_begin(ring, 4);
811         if (ret)
812                 return ret;
813
814         seqno = i915_gem_next_request_seqno(ring);
815
816         intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
817         intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
818         intel_ring_emit(ring, seqno);
819         intel_ring_emit(ring, MI_USER_INTERRUPT);
820         intel_ring_advance(ring);
821
822         *result = seqno;
823         return 0;
824 }
825
826 static bool
827 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
828 {
829         struct drm_device *dev = ring->dev;
830         drm_i915_private_t *dev_priv = dev->dev_private;
831
832         if (!dev->irq_enabled)
833                return false;
834
835         /* It looks like we need to prevent the gt from suspending while waiting
836          * for an notifiy irq, otherwise irqs seem to get lost on at least the
837          * blt/bsd rings on ivb. */
838         if (IS_GEN7(dev))
839                 gen6_gt_force_wake_get(dev_priv);
840
841         spin_lock(&ring->irq_lock);
842         if (ring->irq_refcount++ == 0) {
843                 ring->irq_mask &= ~rflag;
844                 I915_WRITE_IMR(ring, ring->irq_mask);
845                 ironlake_enable_irq(dev_priv, gflag);
846         }
847         spin_unlock(&ring->irq_lock);
848
849         return true;
850 }
851
852 static void
853 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
854 {
855         struct drm_device *dev = ring->dev;
856         drm_i915_private_t *dev_priv = dev->dev_private;
857
858         spin_lock(&ring->irq_lock);
859         if (--ring->irq_refcount == 0) {
860                 ring->irq_mask |= rflag;
861                 I915_WRITE_IMR(ring, ring->irq_mask);
862                 ironlake_disable_irq(dev_priv, gflag);
863         }
864         spin_unlock(&ring->irq_lock);
865
866         if (IS_GEN7(dev))
867                 gen6_gt_force_wake_put(dev_priv);
868 }
869
870 static bool
871 bsd_ring_get_irq(struct intel_ring_buffer *ring)
872 {
873         struct drm_device *dev = ring->dev;
874         drm_i915_private_t *dev_priv = dev->dev_private;
875
876         if (!dev->irq_enabled)
877                 return false;
878
879         spin_lock(&ring->irq_lock);
880         if (ring->irq_refcount++ == 0) {
881                 if (IS_G4X(dev))
882                         i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
883                 else
884                         ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
885         }
886         spin_unlock(&ring->irq_lock);
887
888         return true;
889 }
890 static void
891 bsd_ring_put_irq(struct intel_ring_buffer *ring)
892 {
893         struct drm_device *dev = ring->dev;
894         drm_i915_private_t *dev_priv = dev->dev_private;
895
896         spin_lock(&ring->irq_lock);
897         if (--ring->irq_refcount == 0) {
898                 if (IS_G4X(dev))
899                         i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
900                 else
901                         ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
902         }
903         spin_unlock(&ring->irq_lock);
904 }
905
906 static int
907 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
908 {
909         int ret;
910
911         ret = intel_ring_begin(ring, 2);
912         if (ret)
913                 return ret;
914
915         intel_ring_emit(ring,
916                         MI_BATCH_BUFFER_START | (2 << 6) |
917                         MI_BATCH_NON_SECURE_I965);
918         intel_ring_emit(ring, offset);
919         intel_ring_advance(ring);
920
921         return 0;
922 }
923
924 static int
925 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
926                                 u32 offset, u32 len)
927 {
928         struct drm_device *dev = ring->dev;
929         int ret;
930
931         if (IS_I830(dev) || IS_845G(dev)) {
932                 ret = intel_ring_begin(ring, 4);
933                 if (ret)
934                         return ret;
935
936                 intel_ring_emit(ring, MI_BATCH_BUFFER);
937                 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
938                 intel_ring_emit(ring, offset + len - 8);
939                 intel_ring_emit(ring, 0);
940         } else {
941                 ret = intel_ring_begin(ring, 2);
942                 if (ret)
943                         return ret;
944
945                 if (INTEL_INFO(dev)->gen >= 4) {
946                         intel_ring_emit(ring,
947                                         MI_BATCH_BUFFER_START | (2 << 6) |
948                                         MI_BATCH_NON_SECURE_I965);
949                         intel_ring_emit(ring, offset);
950                 } else {
951                         intel_ring_emit(ring,
952                                         MI_BATCH_BUFFER_START | (2 << 6));
953                         intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
954                 }
955         }
956         intel_ring_advance(ring);
957
958         return 0;
959 }
960
961 static void cleanup_status_page(struct intel_ring_buffer *ring)
962 {
963         drm_i915_private_t *dev_priv = ring->dev->dev_private;
964         struct drm_i915_gem_object *obj;
965
966         obj = ring->status_page.obj;
967         if (obj == NULL)
968                 return;
969
970         kunmap(obj->pages[0]);
971         i915_gem_object_unpin(obj);
972         drm_gem_object_unreference(&obj->base);
973         ring->status_page.obj = NULL;
974
975         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
976 }
977
978 static int init_status_page(struct intel_ring_buffer *ring)
979 {
980         struct drm_device *dev = ring->dev;
981         drm_i915_private_t *dev_priv = dev->dev_private;
982         struct drm_i915_gem_object *obj;
983         int ret;
984
985         obj = i915_gem_alloc_object(dev, 4096);
986         if (obj == NULL) {
987                 DRM_ERROR("Failed to allocate status page\n");
988                 ret = -ENOMEM;
989                 goto err;
990         }
991
992         i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
993
994         ret = i915_gem_object_pin(obj, 4096, true);
995         if (ret != 0) {
996                 goto err_unref;
997         }
998
999         ring->status_page.gfx_addr = obj->gtt_offset;
1000         ring->status_page.page_addr = kmap(obj->pages[0]);
1001         if (ring->status_page.page_addr == NULL) {
1002                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
1003                 goto err_unpin;
1004         }
1005         ring->status_page.obj = obj;
1006         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1007
1008         intel_ring_setup_status_page(ring);
1009         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1010                         ring->name, ring->status_page.gfx_addr);
1011
1012         return 0;
1013
1014 err_unpin:
1015         i915_gem_object_unpin(obj);
1016 err_unref:
1017         drm_gem_object_unreference(&obj->base);
1018 err:
1019         return ret;
1020 }
1021
1022 int intel_init_ring_buffer(struct drm_device *dev,
1023                            struct intel_ring_buffer *ring)
1024 {
1025         struct drm_i915_gem_object *obj;
1026         int ret;
1027
1028         ring->dev = dev;
1029         INIT_LIST_HEAD(&ring->active_list);
1030         INIT_LIST_HEAD(&ring->request_list);
1031         INIT_LIST_HEAD(&ring->gpu_write_list);
1032
1033         init_waitqueue_head(&ring->irq_queue);
1034         spin_lock_init(&ring->irq_lock);
1035         ring->irq_mask = ~0;
1036
1037         if (I915_NEED_GFX_HWS(dev)) {
1038                 ret = init_status_page(ring);
1039                 if (ret)
1040                         return ret;
1041         }
1042
1043         obj = i915_gem_alloc_object(dev, ring->size);
1044         if (obj == NULL) {
1045                 DRM_ERROR("Failed to allocate ringbuffer\n");
1046                 ret = -ENOMEM;
1047                 goto err_hws;
1048         }
1049
1050         ring->obj = obj;
1051
1052         ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1053         if (ret)
1054                 goto err_unref;
1055
1056         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1057         if (ret)
1058                 goto err_unpin;
1059
1060         ring->map.size = ring->size;
1061         ring->map.offset = dev->agp->base + obj->gtt_offset;
1062         ring->map.type = 0;
1063         ring->map.flags = 0;
1064         ring->map.mtrr = 0;
1065
1066         drm_core_ioremap_wc(&ring->map, dev);
1067         if (ring->map.handle == NULL) {
1068                 DRM_ERROR("Failed to map ringbuffer.\n");
1069                 ret = -EINVAL;
1070                 goto err_unpin;
1071         }
1072
1073         ring->virtual_start = ring->map.handle;
1074         ret = ring->init(ring);
1075         if (ret)
1076                 goto err_unmap;
1077
1078         /* Workaround an erratum on the i830 which causes a hang if
1079          * the TAIL pointer points to within the last 2 cachelines
1080          * of the buffer.
1081          */
1082         ring->effective_size = ring->size;
1083         if (IS_I830(ring->dev) || IS_845G(ring->dev))
1084                 ring->effective_size -= 128;
1085
1086         return 0;
1087
1088 err_unmap:
1089         drm_core_ioremapfree(&ring->map, dev);
1090 err_unpin:
1091         i915_gem_object_unpin(obj);
1092 err_unref:
1093         drm_gem_object_unreference(&obj->base);
1094         ring->obj = NULL;
1095 err_hws:
1096         cleanup_status_page(ring);
1097         return ret;
1098 }
1099
1100 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1101 {
1102         struct drm_i915_private *dev_priv;
1103         int ret;
1104
1105         if (ring->obj == NULL)
1106                 return;
1107
1108         /* Disable the ring buffer. The ring must be idle at this point */
1109         dev_priv = ring->dev->dev_private;
1110         ret = intel_wait_ring_idle(ring);
1111         if (ret)
1112                 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1113                           ring->name, ret);
1114
1115         I915_WRITE_CTL(ring, 0);
1116
1117         drm_core_ioremapfree(&ring->map, ring->dev);
1118
1119         i915_gem_object_unpin(ring->obj);
1120         drm_gem_object_unreference(&ring->obj->base);
1121         ring->obj = NULL;
1122
1123         if (ring->cleanup)
1124                 ring->cleanup(ring);
1125
1126         cleanup_status_page(ring);
1127 }
1128
1129 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1130 {
1131         unsigned int *virt;
1132         int rem = ring->size - ring->tail;
1133
1134         if (ring->space < rem) {
1135                 int ret = intel_wait_ring_buffer(ring, rem);
1136                 if (ret)
1137                         return ret;
1138         }
1139
1140         virt = (unsigned int *)(ring->virtual_start + ring->tail);
1141         rem /= 8;
1142         while (rem--) {
1143                 *virt++ = MI_NOOP;
1144                 *virt++ = MI_NOOP;
1145         }
1146
1147         ring->tail = 0;
1148         ring->space = ring_space(ring);
1149
1150         return 0;
1151 }
1152
1153 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1154 {
1155         struct drm_device *dev = ring->dev;
1156         struct drm_i915_private *dev_priv = dev->dev_private;
1157         unsigned long end;
1158
1159         trace_i915_ring_wait_begin(ring);
1160         end = jiffies + 3 * HZ;
1161         do {
1162                 ring->head = I915_READ_HEAD(ring);
1163                 ring->space = ring_space(ring);
1164                 if (ring->space >= n) {
1165                         trace_i915_ring_wait_end(ring);
1166                         return 0;
1167                 }
1168
1169                 if (dev->primary->master) {
1170                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1171                         if (master_priv->sarea_priv)
1172                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1173                 }
1174
1175                 msleep(1);
1176                 if (atomic_read(&dev_priv->mm.wedged))
1177                         return -EAGAIN;
1178         } while (!time_after(jiffies, end));
1179         trace_i915_ring_wait_end(ring);
1180         return -EBUSY;
1181 }
1182
1183 int intel_ring_begin(struct intel_ring_buffer *ring,
1184                      int num_dwords)
1185 {
1186         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1187         int n = 4*num_dwords;
1188         int ret;
1189
1190         if (unlikely(atomic_read(&dev_priv->mm.wedged)))
1191                 return -EIO;
1192
1193         if (unlikely(ring->tail + n > ring->effective_size)) {
1194                 ret = intel_wrap_ring_buffer(ring);
1195                 if (unlikely(ret))
1196                         return ret;
1197         }
1198
1199         if (unlikely(ring->space < n)) {
1200                 ret = intel_wait_ring_buffer(ring, n);
1201                 if (unlikely(ret))
1202                         return ret;
1203         }
1204
1205         ring->space -= n;
1206         return 0;
1207 }
1208
1209 void intel_ring_advance(struct intel_ring_buffer *ring)
1210 {
1211         ring->tail &= ring->size - 1;
1212         ring->write_tail(ring, ring->tail);
1213 }
1214
1215 static const struct intel_ring_buffer render_ring = {
1216         .name                   = "render ring",
1217         .id                     = RING_RENDER,
1218         .mmio_base              = RENDER_RING_BASE,
1219         .size                   = 32 * PAGE_SIZE,
1220         .init                   = init_render_ring,
1221         .write_tail             = ring_write_tail,
1222         .flush                  = render_ring_flush,
1223         .add_request            = render_ring_add_request,
1224         .get_seqno              = ring_get_seqno,
1225         .irq_get                = render_ring_get_irq,
1226         .irq_put                = render_ring_put_irq,
1227         .dispatch_execbuffer    = render_ring_dispatch_execbuffer,
1228         .cleanup                = render_ring_cleanup,
1229         .sync_to                = render_ring_sync_to,
1230         .semaphore_register     = {MI_SEMAPHORE_SYNC_INVALID,
1231                                    MI_SEMAPHORE_SYNC_RV,
1232                                    MI_SEMAPHORE_SYNC_RB},
1233         .signal_mbox            = {GEN6_VRSYNC, GEN6_BRSYNC},
1234 };
1235
1236 /* ring buffer for bit-stream decoder */
1237
1238 static const struct intel_ring_buffer bsd_ring = {
1239         .name                   = "bsd ring",
1240         .id                     = RING_BSD,
1241         .mmio_base              = BSD_RING_BASE,
1242         .size                   = 32 * PAGE_SIZE,
1243         .init                   = init_ring_common,
1244         .write_tail             = ring_write_tail,
1245         .flush                  = bsd_ring_flush,
1246         .add_request            = ring_add_request,
1247         .get_seqno              = ring_get_seqno,
1248         .irq_get                = bsd_ring_get_irq,
1249         .irq_put                = bsd_ring_put_irq,
1250         .dispatch_execbuffer    = ring_dispatch_execbuffer,
1251 };
1252
1253
1254 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1255                                      u32 value)
1256 {
1257         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1258
1259        /* Every tail move must follow the sequence below */
1260         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1261                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1262                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1263         I915_WRITE(GEN6_BSD_RNCID, 0x0);
1264
1265         if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1266                 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1267                 50))
1268         DRM_ERROR("timed out waiting for IDLE Indicator\n");
1269
1270         I915_WRITE_TAIL(ring, value);
1271         I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1272                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1273                 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1274 }
1275
1276 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1277                            u32 invalidate, u32 flush)
1278 {
1279         uint32_t cmd;
1280         int ret;
1281
1282         ret = intel_ring_begin(ring, 4);
1283         if (ret)
1284                 return ret;
1285
1286         cmd = MI_FLUSH_DW;
1287         if (invalidate & I915_GEM_GPU_DOMAINS)
1288                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1289         intel_ring_emit(ring, cmd);
1290         intel_ring_emit(ring, 0);
1291         intel_ring_emit(ring, 0);
1292         intel_ring_emit(ring, MI_NOOP);
1293         intel_ring_advance(ring);
1294         return 0;
1295 }
1296
1297 static int
1298 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1299                               u32 offset, u32 len)
1300 {
1301         int ret;
1302
1303         ret = intel_ring_begin(ring, 2);
1304         if (ret)
1305                 return ret;
1306
1307         intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1308         /* bit0-7 is the length on GEN6+ */
1309         intel_ring_emit(ring, offset);
1310         intel_ring_advance(ring);
1311
1312         return 0;
1313 }
1314
1315 static bool
1316 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1317 {
1318         return gen6_ring_get_irq(ring,
1319                                  GT_USER_INTERRUPT,
1320                                  GEN6_RENDER_USER_INTERRUPT);
1321 }
1322
1323 static void
1324 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1325 {
1326         return gen6_ring_put_irq(ring,
1327                                  GT_USER_INTERRUPT,
1328                                  GEN6_RENDER_USER_INTERRUPT);
1329 }
1330
1331 static bool
1332 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1333 {
1334         return gen6_ring_get_irq(ring,
1335                                  GT_GEN6_BSD_USER_INTERRUPT,
1336                                  GEN6_BSD_USER_INTERRUPT);
1337 }
1338
1339 static void
1340 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1341 {
1342         return gen6_ring_put_irq(ring,
1343                                  GT_GEN6_BSD_USER_INTERRUPT,
1344                                  GEN6_BSD_USER_INTERRUPT);
1345 }
1346
1347 /* ring buffer for Video Codec for Gen6+ */
1348 static const struct intel_ring_buffer gen6_bsd_ring = {
1349         .name                   = "gen6 bsd ring",
1350         .id                     = RING_BSD,
1351         .mmio_base              = GEN6_BSD_RING_BASE,
1352         .size                   = 32 * PAGE_SIZE,
1353         .init                   = init_ring_common,
1354         .write_tail             = gen6_bsd_ring_write_tail,
1355         .flush                  = gen6_ring_flush,
1356         .add_request            = gen6_add_request,
1357         .get_seqno              = gen6_ring_get_seqno,
1358         .irq_get                = gen6_bsd_ring_get_irq,
1359         .irq_put                = gen6_bsd_ring_put_irq,
1360         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1361         .sync_to                = gen6_bsd_ring_sync_to,
1362         .semaphore_register     = {MI_SEMAPHORE_SYNC_VR,
1363                                    MI_SEMAPHORE_SYNC_INVALID,
1364                                    MI_SEMAPHORE_SYNC_VB},
1365         .signal_mbox            = {GEN6_RVSYNC, GEN6_BVSYNC},
1366 };
1367
1368 /* Blitter support (SandyBridge+) */
1369
1370 static bool
1371 blt_ring_get_irq(struct intel_ring_buffer *ring)
1372 {
1373         return gen6_ring_get_irq(ring,
1374                                  GT_BLT_USER_INTERRUPT,
1375                                  GEN6_BLITTER_USER_INTERRUPT);
1376 }
1377
1378 static void
1379 blt_ring_put_irq(struct intel_ring_buffer *ring)
1380 {
1381         gen6_ring_put_irq(ring,
1382                           GT_BLT_USER_INTERRUPT,
1383                           GEN6_BLITTER_USER_INTERRUPT);
1384 }
1385
1386
1387 /* Workaround for some stepping of SNB,
1388  * each time when BLT engine ring tail moved,
1389  * the first command in the ring to be parsed
1390  * should be MI_BATCH_BUFFER_START
1391  */
1392 #define NEED_BLT_WORKAROUND(dev) \
1393         (IS_GEN6(dev) && (dev->pdev->revision < 8))
1394
1395 static inline struct drm_i915_gem_object *
1396 to_blt_workaround(struct intel_ring_buffer *ring)
1397 {
1398         return ring->private;
1399 }
1400
1401 static int blt_ring_init(struct intel_ring_buffer *ring)
1402 {
1403         if (NEED_BLT_WORKAROUND(ring->dev)) {
1404                 struct drm_i915_gem_object *obj;
1405                 u32 *ptr;
1406                 int ret;
1407
1408                 obj = i915_gem_alloc_object(ring->dev, 4096);
1409                 if (obj == NULL)
1410                         return -ENOMEM;
1411
1412                 ret = i915_gem_object_pin(obj, 4096, true);
1413                 if (ret) {
1414                         drm_gem_object_unreference(&obj->base);
1415                         return ret;
1416                 }
1417
1418                 ptr = kmap(obj->pages[0]);
1419                 *ptr++ = MI_BATCH_BUFFER_END;
1420                 *ptr++ = MI_NOOP;
1421                 kunmap(obj->pages[0]);
1422
1423                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1424                 if (ret) {
1425                         i915_gem_object_unpin(obj);
1426                         drm_gem_object_unreference(&obj->base);
1427                         return ret;
1428                 }
1429
1430                 ring->private = obj;
1431         }
1432
1433         return init_ring_common(ring);
1434 }
1435
1436 static int blt_ring_begin(struct intel_ring_buffer *ring,
1437                           int num_dwords)
1438 {
1439         if (ring->private) {
1440                 int ret = intel_ring_begin(ring, num_dwords+2);
1441                 if (ret)
1442                         return ret;
1443
1444                 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1445                 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1446
1447                 return 0;
1448         } else
1449                 return intel_ring_begin(ring, 4);
1450 }
1451
1452 static int blt_ring_flush(struct intel_ring_buffer *ring,
1453                           u32 invalidate, u32 flush)
1454 {
1455         uint32_t cmd;
1456         int ret;
1457
1458         ret = blt_ring_begin(ring, 4);
1459         if (ret)
1460                 return ret;
1461
1462         cmd = MI_FLUSH_DW;
1463         if (invalidate & I915_GEM_DOMAIN_RENDER)
1464                 cmd |= MI_INVALIDATE_TLB;
1465         intel_ring_emit(ring, cmd);
1466         intel_ring_emit(ring, 0);
1467         intel_ring_emit(ring, 0);
1468         intel_ring_emit(ring, MI_NOOP);
1469         intel_ring_advance(ring);
1470         return 0;
1471 }
1472
1473 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1474 {
1475         if (!ring->private)
1476                 return;
1477
1478         i915_gem_object_unpin(ring->private);
1479         drm_gem_object_unreference(ring->private);
1480         ring->private = NULL;
1481 }
1482
1483 static const struct intel_ring_buffer gen6_blt_ring = {
1484         .name                   = "blt ring",
1485         .id                     = RING_BLT,
1486         .mmio_base              = BLT_RING_BASE,
1487         .size                   = 32 * PAGE_SIZE,
1488         .init                   = blt_ring_init,
1489         .write_tail             = ring_write_tail,
1490         .flush                  = blt_ring_flush,
1491         .add_request            = gen6_add_request,
1492         .get_seqno              = gen6_ring_get_seqno,
1493         .irq_get                = blt_ring_get_irq,
1494         .irq_put                = blt_ring_put_irq,
1495         .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
1496         .cleanup                = blt_ring_cleanup,
1497         .sync_to                = gen6_blt_ring_sync_to,
1498         .semaphore_register     = {MI_SEMAPHORE_SYNC_BR,
1499                                    MI_SEMAPHORE_SYNC_BV,
1500                                    MI_SEMAPHORE_SYNC_INVALID},
1501         .signal_mbox            = {GEN6_RBSYNC, GEN6_VBSYNC},
1502 };
1503
1504 int intel_init_render_ring_buffer(struct drm_device *dev)
1505 {
1506         drm_i915_private_t *dev_priv = dev->dev_private;
1507         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1508
1509         *ring = render_ring;
1510         if (INTEL_INFO(dev)->gen >= 6) {
1511                 ring->add_request = gen6_add_request;
1512                 ring->flush = gen6_render_ring_flush;
1513                 ring->irq_get = gen6_render_ring_get_irq;
1514                 ring->irq_put = gen6_render_ring_put_irq;
1515                 ring->get_seqno = gen6_ring_get_seqno;
1516         } else if (IS_GEN5(dev)) {
1517                 ring->add_request = pc_render_add_request;
1518                 ring->get_seqno = pc_render_get_seqno;
1519         }
1520
1521         if (!I915_NEED_GFX_HWS(dev)) {
1522                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1523                 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1524         }
1525
1526         return intel_init_ring_buffer(dev, ring);
1527 }
1528
1529 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1530 {
1531         drm_i915_private_t *dev_priv = dev->dev_private;
1532         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1533
1534         *ring = render_ring;
1535         if (INTEL_INFO(dev)->gen >= 6) {
1536                 ring->add_request = gen6_add_request;
1537                 ring->irq_get = gen6_render_ring_get_irq;
1538                 ring->irq_put = gen6_render_ring_put_irq;
1539         } else if (IS_GEN5(dev)) {
1540                 ring->add_request = pc_render_add_request;
1541                 ring->get_seqno = pc_render_get_seqno;
1542         }
1543
1544         if (!I915_NEED_GFX_HWS(dev))
1545                 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1546
1547         ring->dev = dev;
1548         INIT_LIST_HEAD(&ring->active_list);
1549         INIT_LIST_HEAD(&ring->request_list);
1550         INIT_LIST_HEAD(&ring->gpu_write_list);
1551
1552         ring->size = size;
1553         ring->effective_size = ring->size;
1554         if (IS_I830(ring->dev))
1555                 ring->effective_size -= 128;
1556
1557         ring->map.offset = start;
1558         ring->map.size = size;
1559         ring->map.type = 0;
1560         ring->map.flags = 0;
1561         ring->map.mtrr = 0;
1562
1563         drm_core_ioremap_wc(&ring->map, dev);
1564         if (ring->map.handle == NULL) {
1565                 DRM_ERROR("can not ioremap virtual address for"
1566                           " ring buffer\n");
1567                 return -ENOMEM;
1568         }
1569
1570         ring->virtual_start = (void __force __iomem *)ring->map.handle;
1571         return 0;
1572 }
1573
1574 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1575 {
1576         drm_i915_private_t *dev_priv = dev->dev_private;
1577         struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1578
1579         if (IS_GEN6(dev) || IS_GEN7(dev))
1580                 *ring = gen6_bsd_ring;
1581         else
1582                 *ring = bsd_ring;
1583
1584         return intel_init_ring_buffer(dev, ring);
1585 }
1586
1587 int intel_init_blt_ring_buffer(struct drm_device *dev)
1588 {
1589         drm_i915_private_t *dev_priv = dev->dev_private;
1590         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1591
1592         *ring = gen6_blt_ring;
1593
1594         return intel_init_ring_buffer(dev, ring);
1595 }