1 /**************************************************************************
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 #include "vmwgfx_drv.h"
32 #include "ttm/ttm_placement.h"
34 #define VMW_DIRTY_DELAY (HZ / 30)
37 struct vmw_private *vmw_priv;
41 struct vmw_dma_buffer *vmw_bo;
42 struct ttm_bo_kmap_obj map;
44 u32 pseudo_palette[17];
66 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
67 unsigned blue, unsigned transp,
70 struct vmw_fb_par *par = info->par;
71 u32 *pal = par->pseudo_palette;
74 DRM_ERROR("Bad regno %u.\n", regno);
81 pal[regno] = ((red & 0xff00) << 8) |
83 ((blue & 0xff00) >> 8);
86 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
93 static int vmw_fb_check_var(struct fb_var_screeninfo *var,
96 int depth = var->bits_per_pixel;
97 struct vmw_fb_par *par = info->par;
98 struct vmw_private *vmw_priv = par->vmw_priv;
100 switch (var->bits_per_pixel) {
102 depth = (var->transp.length > 0) ? 32 : 24;
105 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
111 var->red.offset = 16;
112 var->green.offset = 8;
113 var->blue.offset = 0;
115 var->green.length = 8;
116 var->blue.length = 8;
117 var->transp.length = 0;
118 var->transp.offset = 0;
121 var->red.offset = 16;
122 var->green.offset = 8;
123 var->blue.offset = 0;
125 var->green.length = 8;
126 var->blue.length = 8;
127 var->transp.length = 8;
128 var->transp.offset = 24;
131 DRM_ERROR("Bad depth %u.\n", depth);
135 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
136 (var->xoffset != 0 || var->yoffset != 0)) {
137 DRM_ERROR("Can not handle panning without display topology\n");
141 if ((var->xoffset + var->xres) > par->max_width ||
142 (var->yoffset + var->yres) > par->max_height) {
143 DRM_ERROR("Requested geom can not fit in framebuffer\n");
147 if (!vmw_kms_validate_mode_vram(vmw_priv,
148 info->fix.line_length,
149 var->yoffset + var->yres)) {
150 DRM_ERROR("Requested geom can not fit in framebuffer\n");
157 static int vmw_fb_set_par(struct fb_info *info)
159 struct vmw_fb_par *par = info->par;
160 struct vmw_private *vmw_priv = par->vmw_priv;
162 vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
163 info->fix.line_length,
164 par->bpp, par->depth);
165 if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
166 /* TODO check if pitch and offset changes */
167 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
168 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
169 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
170 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
171 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
172 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
173 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
174 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
177 /* This is really helpful since if this fails the user
178 * can probably not see anything on the screen.
180 WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
185 static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
186 struct fb_info *info)
191 static int vmw_fb_blank(int blank, struct fb_info *info)
200 static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
202 struct vmw_private *vmw_priv = par->vmw_priv;
203 struct fb_info *info = vmw_priv->fb_info;
204 int stride = (info->fix.line_length / 4);
205 int *src = (int *)info->screen_base;
206 __le32 __iomem *vram_mem = par->bo_ptr;
212 SVGAFifoCmdUpdate body;
215 if (vmw_priv->suspended)
218 spin_lock_irqsave(&par->dirty.lock, flags);
219 if (!par->dirty.active) {
220 spin_unlock_irqrestore(&par->dirty.lock, flags);
225 w = min(par->dirty.x2, info->var.xres) - x;
226 h = min(par->dirty.y2, info->var.yres) - y;
227 par->dirty.x1 = par->dirty.x2 = 0;
228 par->dirty.y1 = par->dirty.y2 = 0;
229 spin_unlock_irqrestore(&par->dirty.lock, flags);
231 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
232 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
233 iowrite32(src[k], vram_mem + k);
237 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
240 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
241 if (unlikely(cmd == NULL)) {
242 DRM_ERROR("Fifo reserve failed.\n");
246 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
247 cmd->body.x = cpu_to_le32(x);
248 cmd->body.y = cpu_to_le32(y);
249 cmd->body.width = cpu_to_le32(w);
250 cmd->body.height = cpu_to_le32(h);
251 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
254 static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
255 unsigned x1, unsigned y1,
256 unsigned width, unsigned height)
258 struct fb_info *info = par->vmw_priv->fb_info;
260 unsigned x2 = x1 + width;
261 unsigned y2 = y1 + height;
263 spin_lock_irqsave(&par->dirty.lock, flags);
264 if (par->dirty.x1 == par->dirty.x2) {
269 /* if we are active start the dirty work
270 * we share the work with the defio system */
271 if (par->dirty.active)
272 schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
274 if (x1 < par->dirty.x1)
276 if (y1 < par->dirty.y1)
278 if (x2 > par->dirty.x2)
280 if (y2 > par->dirty.y2)
283 spin_unlock_irqrestore(&par->dirty.lock, flags);
286 static void vmw_deferred_io(struct fb_info *info,
287 struct list_head *pagelist)
289 struct vmw_fb_par *par = info->par;
290 unsigned long start, end, min, max;
297 list_for_each_entry(page, pagelist, lru) {
298 start = page->index << PAGE_SHIFT;
299 end = start + PAGE_SIZE - 1;
300 min = min(min, start);
305 y1 = min / info->fix.line_length;
306 y2 = (max / info->fix.line_length) + 1;
308 spin_lock_irqsave(&par->dirty.lock, flags);
311 par->dirty.x2 = info->var.xres;
313 spin_unlock_irqrestore(&par->dirty.lock, flags);
316 vmw_fb_dirty_flush(par);
319 struct fb_deferred_io vmw_defio = {
320 .delay = VMW_DIRTY_DELAY,
321 .deferred_io = vmw_deferred_io,
328 static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
330 cfb_fillrect(info, rect);
331 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
332 rect->width, rect->height);
335 static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
337 cfb_copyarea(info, region);
338 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
339 region->width, region->height);
342 static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
344 cfb_imageblit(info, image);
345 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
346 image->width, image->height);
353 static struct fb_ops vmw_fb_ops = {
354 .owner = THIS_MODULE,
355 .fb_check_var = vmw_fb_check_var,
356 .fb_set_par = vmw_fb_set_par,
357 .fb_setcolreg = vmw_fb_setcolreg,
358 .fb_fillrect = vmw_fb_fillrect,
359 .fb_copyarea = vmw_fb_copyarea,
360 .fb_imageblit = vmw_fb_imageblit,
361 .fb_pan_display = vmw_fb_pan_display,
362 .fb_blank = vmw_fb_blank,
365 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
366 size_t size, struct vmw_dma_buffer **out)
368 struct vmw_dma_buffer *vmw_bo;
369 struct ttm_placement ne_placement = vmw_vram_ne_placement;
372 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
375 ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
376 if (unlikely(ret != 0))
379 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
383 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
386 &vmw_dmabuf_bo_free);
387 if (unlikely(ret != 0))
388 goto err_unlock; /* init frees the buffer on failure */
392 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
397 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
401 int vmw_fb_init(struct vmw_private *vmw_priv)
403 struct device *device = &vmw_priv->dev->pdev->dev;
404 struct vmw_fb_par *par;
405 struct fb_info *info;
406 unsigned initial_width, initial_height;
407 unsigned fb_width, fb_height;
408 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
411 /* XXX These shouldn't be hardcoded. */
413 initial_height = 600;
418 /* XXX As shouldn't these be as well. */
419 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
420 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
422 initial_width = min(fb_width, initial_width);
423 initial_height = min(fb_height, initial_height);
425 fb_pitch = fb_width * fb_bbp / 8;
426 fb_size = fb_pitch * fb_height;
427 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
429 info = framebuffer_alloc(sizeof(*par), device);
436 vmw_priv->fb_info = info;
438 par->vmw_priv = vmw_priv;
439 par->depth = fb_depth;
442 par->max_width = fb_width;
443 par->max_height = fb_height;
446 * Create buffers and alloc memory
448 par->vmalloc = vmalloc(fb_size);
449 if (unlikely(par->vmalloc == NULL)) {
454 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
455 if (unlikely(ret != 0))
458 ret = ttm_bo_kmap(&par->vmw_bo->base,
460 par->vmw_bo->base.num_pages,
462 if (unlikely(ret != 0))
464 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
465 par->bo_size = fb_size;
470 strcpy(info->fix.id, "svgadrmfb");
471 info->fix.type = FB_TYPE_PACKED_PIXELS;
472 info->fix.visual = FB_VISUAL_TRUECOLOR;
473 info->fix.type_aux = 0;
474 info->fix.xpanstep = 1; /* doing it in hw */
475 info->fix.ypanstep = 1; /* doing it in hw */
476 info->fix.ywrapstep = 0;
477 info->fix.accel = FB_ACCEL_NONE;
478 info->fix.line_length = fb_pitch;
480 info->fix.smem_start = 0;
481 info->fix.smem_len = fb_size;
483 info->fix.mmio_start = 0;
484 info->fix.mmio_len = 0;
486 info->pseudo_palette = par->pseudo_palette;
487 info->screen_base = par->vmalloc;
488 info->screen_size = fb_size;
490 info->flags = FBINFO_DEFAULT;
491 info->fbops = &vmw_fb_ops;
493 /* 24 depth per default */
494 info->var.red.offset = 16;
495 info->var.green.offset = 8;
496 info->var.blue.offset = 0;
497 info->var.red.length = 8;
498 info->var.green.length = 8;
499 info->var.blue.length = 8;
500 info->var.transp.offset = 0;
501 info->var.transp.length = 0;
503 info->var.xres_virtual = fb_width;
504 info->var.yres_virtual = fb_height;
505 info->var.bits_per_pixel = par->bpp;
506 info->var.xoffset = 0;
507 info->var.yoffset = 0;
508 info->var.activate = FB_ACTIVATE_NOW;
509 info->var.height = -1;
510 info->var.width = -1;
512 info->var.xres = initial_width;
513 info->var.yres = initial_height;
516 info->pixmap.size = 64*1024;
517 info->pixmap.buf_align = 8;
518 info->pixmap.access_align = 32;
519 info->pixmap.flags = FB_PIXMAP_SYSTEM;
520 info->pixmap.scan_align = 1;
522 info->pixmap.size = 0;
523 info->pixmap.buf_align = 8;
524 info->pixmap.access_align = 32;
525 info->pixmap.flags = FB_PIXMAP_SYSTEM;
526 info->pixmap.scan_align = 1;
529 info->apertures = alloc_apertures(1);
530 if (!info->apertures) {
534 info->apertures->ranges[0].base = vmw_priv->vram_start;
535 info->apertures->ranges[0].size = vmw_priv->vram_size;
538 * Dirty & Deferred IO
540 par->dirty.x1 = par->dirty.x2 = 0;
541 par->dirty.y1 = par->dirty.y2 = 0;
542 par->dirty.active = true;
543 spin_lock_init(&par->dirty.lock);
544 info->fbdefio = &vmw_defio;
545 fb_deferred_io_init(info);
547 ret = register_framebuffer(info);
548 if (unlikely(ret != 0))
554 fb_deferred_io_cleanup(info);
556 ttm_bo_kunmap(&par->map);
558 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
561 framebuffer_release(info);
562 vmw_priv->fb_info = NULL;
567 int vmw_fb_close(struct vmw_private *vmw_priv)
569 struct fb_info *info;
570 struct vmw_fb_par *par;
571 struct ttm_buffer_object *bo;
573 if (!vmw_priv->fb_info)
576 info = vmw_priv->fb_info;
578 bo = &par->vmw_bo->base;
582 fb_deferred_io_cleanup(info);
583 unregister_framebuffer(info);
585 ttm_bo_kunmap(&par->map);
589 framebuffer_release(info);
594 int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
595 struct vmw_dma_buffer *vmw_bo)
597 struct ttm_buffer_object *bo = &vmw_bo->base;
600 ret = ttm_bo_reserve(bo, false, false, false, 0);
601 if (unlikely(ret != 0))
604 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
605 ttm_bo_unreserve(bo);
610 int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
611 struct vmw_dma_buffer *vmw_bo)
613 struct ttm_buffer_object *bo = &vmw_bo->base;
614 struct ttm_placement ne_placement = vmw_vram_ne_placement;
615 struct drm_mm_node *mm_node;
618 ne_placement.lpfn = bo->num_pages;
621 ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
622 if (unlikely(ret != 0))
625 ret = ttm_bo_reserve(bo, false, false, false, 0);
626 if (unlikely(ret != 0))
629 mm_node = bo->mem.mm_node;
630 if (bo->mem.mem_type == TTM_PL_VRAM &&
631 mm_node->start < bo->num_pages)
632 (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
635 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
637 /* Could probably bug on */
638 WARN_ON(bo->offset != 0);
640 ttm_bo_unreserve(bo);
642 ttm_write_unlock(&vmw_priv->active_master->lock);
647 int vmw_fb_off(struct vmw_private *vmw_priv)
649 struct fb_info *info;
650 struct vmw_fb_par *par;
653 if (!vmw_priv->fb_info)
656 info = vmw_priv->fb_info;
659 spin_lock_irqsave(&par->dirty.lock, flags);
660 par->dirty.active = false;
661 spin_unlock_irqrestore(&par->dirty.lock, flags);
663 flush_scheduled_work();
666 ttm_bo_kunmap(&par->map);
668 vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
673 int vmw_fb_on(struct vmw_private *vmw_priv)
675 struct fb_info *info;
676 struct vmw_fb_par *par;
681 if (!vmw_priv->fb_info)
684 info = vmw_priv->fb_info;
687 /* we are already active */
688 if (par->bo_ptr != NULL)
691 /* Make sure that all overlays are stoped when we take over */
692 vmw_overlay_stop_all(vmw_priv);
694 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
695 if (unlikely(ret != 0)) {
696 DRM_ERROR("could not move buffer to start of VRAM\n");
700 ret = ttm_bo_kmap(&par->vmw_bo->base,
702 par->vmw_bo->base.num_pages,
705 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
707 spin_lock_irqsave(&par->dirty.lock, flags);
708 par->dirty.active = true;
709 spin_unlock_irqrestore(&par->dirty.lock, flags);
712 vmw_fb_set_par(info);
714 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
716 /* If there already was stuff dirty we wont
717 * schedule a new work, so lets do it now */
718 schedule_delayed_work(&info->deferred_work, 0);