1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
30 #include <drm/ttm/ttm_placement.h>
32 struct vmw_temp_set_context {
33 SVGA3dCmdHeader header;
34 SVGA3dCmdDXTempSetContext body;
37 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
39 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
40 uint32_t fifo_min, hwversion;
41 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
43 if (!(dev_priv->capabilities & SVGA_CAP_3D))
46 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
49 if (!dev_priv->has_mob)
52 spin_lock(&dev_priv->cap_lock);
53 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
54 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
55 spin_unlock(&dev_priv->cap_lock);
60 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
63 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
64 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
67 hwversion = ioread32(fifo_mem +
68 ((fifo->capabilities &
69 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
70 SVGA_FIFO_3D_HWVERSION_REVISED :
71 SVGA_FIFO_3D_HWVERSION));
76 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
79 /* Legacy Display Unit does not support surfaces */
80 if (dev_priv->active_display_unit == vmw_du_legacy)
86 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
88 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
91 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
94 caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
95 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
101 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
103 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
108 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
109 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
110 if (unlikely(fifo->static_buffer == NULL))
113 fifo->dynamic_buffer = NULL;
114 fifo->reserved_size = 0;
115 fifo->using_bounce_buffer = false;
117 mutex_init(&fifo->fifo_mutex);
118 init_rwsem(&fifo->rwsem);
120 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
121 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
122 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
124 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
125 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
126 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
128 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
129 SVGA_REG_ENABLE_HIDE);
130 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
133 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
134 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
140 iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
141 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
143 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
144 iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
145 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
148 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
150 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
151 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
152 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
154 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
157 (unsigned int) fifo->capabilities);
159 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
160 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
161 vmw_marker_queue_init(&fifo->marker_queue);
166 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
168 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
169 static DEFINE_SPINLOCK(ping_lock);
170 unsigned long irq_flags;
173 * The ping_lock is needed because we don't have an atomic
174 * test-and-set of the SVGA_FIFO_BUSY register.
176 spin_lock_irqsave(&ping_lock, irq_flags);
177 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
178 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
179 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
181 spin_unlock_irqrestore(&ping_lock, irq_flags);
184 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
186 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
188 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
189 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
192 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
194 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
195 dev_priv->config_done_state);
196 vmw_write(dev_priv, SVGA_REG_ENABLE,
197 dev_priv->enable_state);
198 vmw_write(dev_priv, SVGA_REG_TRACES,
199 dev_priv->traces_state);
201 vmw_marker_queue_takedown(&fifo->marker_queue);
203 if (likely(fifo->static_buffer != NULL)) {
204 vfree(fifo->static_buffer);
205 fifo->static_buffer = NULL;
208 if (likely(fifo->dynamic_buffer != NULL)) {
209 vfree(fifo->dynamic_buffer);
210 fifo->dynamic_buffer = NULL;
214 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
216 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
217 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
218 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
219 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
220 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
222 return ((max - next_cmd) + (stop - min) <= bytes);
225 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
226 uint32_t bytes, bool interruptible,
227 unsigned long timeout)
230 unsigned long end_jiffies = jiffies + timeout;
233 DRM_INFO("Fifo wait noirq.\n");
236 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
238 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
239 if (!vmw_fifo_is_full(dev_priv, bytes))
241 if (time_after_eq(jiffies, end_jiffies)) {
243 DRM_ERROR("SVGA device lockup.\n");
247 if (interruptible && signal_pending(current)) {
252 finish_wait(&dev_priv->fifo_queue, &__wait);
253 wake_up_all(&dev_priv->fifo_queue);
254 DRM_INFO("Fifo noirq exit.\n");
258 static int vmw_fifo_wait(struct vmw_private *dev_priv,
259 uint32_t bytes, bool interruptible,
260 unsigned long timeout)
263 unsigned long irq_flags;
265 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
268 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
269 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
270 return vmw_fifo_wait_noirq(dev_priv, bytes,
271 interruptible, timeout);
273 spin_lock(&dev_priv->waiter_lock);
274 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
275 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
276 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
277 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
278 dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
279 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
280 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
282 spin_unlock(&dev_priv->waiter_lock);
285 ret = wait_event_interruptible_timeout
286 (dev_priv->fifo_queue,
287 !vmw_fifo_is_full(dev_priv, bytes), timeout);
289 ret = wait_event_timeout
290 (dev_priv->fifo_queue,
291 !vmw_fifo_is_full(dev_priv, bytes), timeout);
293 if (unlikely(ret == 0))
295 else if (likely(ret > 0))
298 spin_lock(&dev_priv->waiter_lock);
299 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
300 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
301 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
302 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
303 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
305 spin_unlock(&dev_priv->waiter_lock);
311 * Reserve @bytes number of bytes in the fifo.
313 * This function will return NULL (error) on two conditions:
314 * If it timeouts waiting for fifo space, or if @bytes is larger than the
315 * available fifo space.
318 * Pointer to the fifo, or null on error (possible hardware hang).
320 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
323 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
324 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
328 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
331 mutex_lock(&fifo_state->fifo_mutex);
332 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
333 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
334 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
336 if (unlikely(bytes >= (max - min)))
339 BUG_ON(fifo_state->reserved_size != 0);
340 BUG_ON(fifo_state->dynamic_buffer != NULL);
342 fifo_state->reserved_size = bytes;
345 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
346 bool need_bounce = false;
347 bool reserve_in_place = false;
349 if (next_cmd >= stop) {
350 if (likely((next_cmd + bytes < max ||
351 (next_cmd + bytes == max && stop > min))))
352 reserve_in_place = true;
354 else if (vmw_fifo_is_full(dev_priv, bytes)) {
355 ret = vmw_fifo_wait(dev_priv, bytes,
357 if (unlikely(ret != 0))
364 if (likely((next_cmd + bytes < stop)))
365 reserve_in_place = true;
367 ret = vmw_fifo_wait(dev_priv, bytes,
369 if (unlikely(ret != 0))
374 if (reserve_in_place) {
375 if (reserveable || bytes <= sizeof(uint32_t)) {
376 fifo_state->using_bounce_buffer = false;
379 iowrite32(bytes, fifo_mem +
381 return (void __force *) (fifo_mem +
389 fifo_state->using_bounce_buffer = true;
390 if (bytes < fifo_state->static_buffer_size)
391 return fifo_state->static_buffer;
393 fifo_state->dynamic_buffer = vmalloc(bytes);
394 return fifo_state->dynamic_buffer;
399 fifo_state->reserved_size = 0;
400 mutex_unlock(&fifo_state->fifo_mutex);
405 void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
411 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
412 ctx_id, false, NULL);
413 else if (ctx_id == SVGA3D_INVALID_ID)
414 ret = vmw_local_fifo_reserve(dev_priv, bytes);
416 WARN_ON("Command buffer has not been allocated.\n");
419 if (IS_ERR_OR_NULL(ret)) {
420 DRM_ERROR("Fifo reserve failure of %u bytes.\n",
429 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
430 u32 __iomem *fifo_mem,
432 uint32_t max, uint32_t min, uint32_t bytes)
434 uint32_t chunk_size = max - next_cmd;
436 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
437 fifo_state->dynamic_buffer : fifo_state->static_buffer;
439 if (bytes < chunk_size)
442 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
444 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
445 rest = bytes - chunk_size;
447 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
451 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
452 u32 __iomem *fifo_mem,
454 uint32_t max, uint32_t min, uint32_t bytes)
456 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
457 fifo_state->dynamic_buffer : fifo_state->static_buffer;
460 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
461 next_cmd += sizeof(uint32_t);
462 if (unlikely(next_cmd == max))
465 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
467 bytes -= sizeof(uint32_t);
471 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
473 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
474 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
475 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
476 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
477 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
478 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
481 bytes += sizeof(struct vmw_temp_set_context);
483 fifo_state->dx = false;
484 BUG_ON((bytes & 3) != 0);
485 BUG_ON(bytes > fifo_state->reserved_size);
487 fifo_state->reserved_size = 0;
489 if (fifo_state->using_bounce_buffer) {
491 vmw_fifo_res_copy(fifo_state, fifo_mem,
492 next_cmd, max, min, bytes);
494 vmw_fifo_slow_copy(fifo_state, fifo_mem,
495 next_cmd, max, min, bytes);
497 if (fifo_state->dynamic_buffer) {
498 vfree(fifo_state->dynamic_buffer);
499 fifo_state->dynamic_buffer = NULL;
504 down_write(&fifo_state->rwsem);
505 if (fifo_state->using_bounce_buffer || reserveable) {
508 next_cmd -= max - min;
510 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
514 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
516 up_write(&fifo_state->rwsem);
517 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
518 mutex_unlock(&fifo_state->fifo_mutex);
521 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
524 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
526 vmw_local_fifo_commit(dev_priv, bytes);
531 * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
533 * @dev_priv: Pointer to device private structure.
534 * @bytes: Number of bytes to commit.
536 void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
539 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
541 vmw_local_fifo_commit(dev_priv, bytes);
545 * vmw_fifo_flush - Flush any buffered commands and make sure command processing
548 * @dev_priv: Pointer to device private structure.
549 * @interruptible: Whether to wait interruptible if function needs to sleep.
551 int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
556 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
561 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
563 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
564 struct svga_fifo_cmd_fence *cmd_fence;
567 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
569 fm = vmw_fifo_reserve(dev_priv, bytes);
570 if (unlikely(fm == NULL)) {
571 *seqno = atomic_read(&dev_priv->marker_seq);
573 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
579 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
580 } while (*seqno == 0);
582 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
585 * Don't request hardware to send a fence. The
586 * waiting code in vmwgfx_irq.c will emulate this.
589 vmw_fifo_commit(dev_priv, 0);
593 *fm++ = SVGA_CMD_FENCE;
594 cmd_fence = (struct svga_fifo_cmd_fence *) fm;
595 cmd_fence->fence = *seqno;
596 vmw_fifo_commit_flush(dev_priv, bytes);
597 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
598 vmw_update_seqno(dev_priv, fifo_state);
605 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
606 * legacy query commands.
608 * @dev_priv: The device private structure.
609 * @cid: The hardware context id used for the query.
611 * See the vmw_fifo_emit_dummy_query documentation.
613 static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
617 * A query wait without a preceding query end will
618 * actually finish all queries for this cid
619 * without writing to the query result structure.
622 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
624 SVGA3dCmdHeader header;
625 SVGA3dCmdWaitForQuery body;
628 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
630 if (unlikely(cmd == NULL)) {
631 DRM_ERROR("Out of fifo space for dummy query.\n");
635 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
636 cmd->header.size = sizeof(cmd->body);
638 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
640 if (bo->mem.mem_type == TTM_PL_VRAM) {
641 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
642 cmd->body.guestResult.offset = bo->offset;
644 cmd->body.guestResult.gmrId = bo->mem.start;
645 cmd->body.guestResult.offset = 0;
648 vmw_fifo_commit(dev_priv, sizeof(*cmd));
654 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
655 * guest-backed resource query commands.
657 * @dev_priv: The device private structure.
658 * @cid: The hardware context id used for the query.
660 * See the vmw_fifo_emit_dummy_query documentation.
662 static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
666 * A query wait without a preceding query end will
667 * actually finish all queries for this cid
668 * without writing to the query result structure.
671 struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
673 SVGA3dCmdHeader header;
674 SVGA3dCmdWaitForGBQuery body;
677 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
679 if (unlikely(cmd == NULL)) {
680 DRM_ERROR("Out of fifo space for dummy query.\n");
684 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
685 cmd->header.size = sizeof(cmd->body);
687 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
688 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
689 cmd->body.mobid = bo->mem.start;
690 cmd->body.offset = 0;
692 vmw_fifo_commit(dev_priv, sizeof(*cmd));
699 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
700 * appropriate resource query commands.
702 * @dev_priv: The device private structure.
703 * @cid: The hardware context id used for the query.
705 * This function is used to emit a dummy occlusion query with
706 * no primitives rendered between query begin and query end.
707 * It's used to provide a query barrier, in order to know that when
708 * this query is finished, all preceding queries are also finished.
710 * A Query results structure should have been initialized at the start
711 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
712 * must also be either reserved or pinned when this function is called.
714 * Returns -ENOMEM on failure to reserve fifo space.
716 int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
719 if (dev_priv->has_mob)
720 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
722 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
725 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
727 return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);