From: Rob Clark Date: Tue, 3 Mar 2015 20:04:25 +0000 (-0500) Subject: drm/msm: add support for "stolen" mem X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=072f1f9168ed67d6ddc94bb76b1dfc04795062b4;p=linux-beck.git drm/msm: add support for "stolen" mem Add support to use the VRAM carveout (if specified in dtb) for fbdev scanout buffer. This allows drm/msm to take over a bootloader splash- screen, and avoids corruption on screen that results if the kernel uses memory that is still being scanned out for itself. Signed-off-by: Rob Clark --- diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b250610e6393..0c38f34066e5 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -182,21 +182,57 @@ static int get_mdp_ver(struct platform_device *pdev) return 4; } +#include + static int msm_init_vram(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; + unsigned long size = 0; + int ret = 0; + +#ifdef CONFIG_OF + /* In the device-tree world, we could have a 'memory-region' + * phandle, which gives us a link to our "vram". Allocating + * is all nicely abstracted behind the dma api, but we need + * to know the entire size to allocate it all in one go. There + * are two cases: + * 1) device with no IOMMU, in which case we need exclusive + * access to a VRAM carveout big enough for all gpu + * buffers + * 2) device with IOMMU, but where the bootloader puts up + * a splash screen. In this case, the VRAM carveout + * need only be large enough for fbdev fb. But we need + * exclusive access to the buffer to avoid the kernel + * using those pages for other purposes (which appears + * as corruption on screen before we have a chance to + * load and do initial modeset) + */ + struct device_node *node; + + node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); + if (node) { + struct resource r; + ret = of_address_to_resource(node, 0, &r); + if (ret) + return ret; + size = r.end - r.start; + DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start); + } else +#endif /* if we have no IOMMU, then we need to use carveout allocator. * Grab the entire CMA chunk carved out in early startup in * mach-msm: */ if (!iommu_present(&platform_bus_type)) { + DRM_INFO("using %s VRAM carveout\n", vram); + size = memparse(vram, NULL); + } + + if (size) { DEFINE_DMA_ATTRS(attrs); - unsigned long size; void *p; - DBG("using %s VRAM carveout", vram); - size = memparse(vram, NULL); priv->vram.size = size; drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); @@ -220,7 +256,7 @@ static int msm_init_vram(struct drm_device *dev) (uint32_t)(priv->vram.paddr + size)); } - return 0; + return ret; } static int msm_load(struct drm_device *dev, unsigned long flags) diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index df60f65728ff..95f6532df02d 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -110,7 +110,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, size = mode_cmd.pitches[0] * mode_cmd.height; DBG("allocating %d bytes for fb %d", size, dev->primary->index); mutex_lock(&dev->struct_mutex); - fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC); + fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | + MSM_BO_WC | MSM_BO_STOLEN); mutex_unlock(&dev->struct_mutex); if (IS_ERR(fbdev->bo)) { ret = PTR_ERR(fbdev->bo); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 49dea4fb55ac..479d8af72bcb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -32,6 +32,12 @@ static dma_addr_t physaddr(struct drm_gem_object *obj) priv->vram.paddr; } +static bool use_pages(struct drm_gem_object *obj) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + return !msm_obj->vram_node; +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) @@ -72,7 +78,7 @@ static struct page **get_pages(struct drm_gem_object *obj) struct page **p; int npages = obj->size >> PAGE_SHIFT; - if (iommu_present(&platform_bus_type)) + if (use_pages(obj)) p = drm_gem_get_pages(obj); else p = get_pages_vram(obj, npages); @@ -116,7 +122,7 @@ static void put_pages(struct drm_gem_object *obj) sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); - if (iommu_present(&platform_bus_type)) + if (use_pages(obj)) drm_gem_put_pages(obj, msm_obj->pages, true, false); else { drm_mm_remove_node(msm_obj->vram_node); @@ -580,6 +586,7 @@ static int msm_gem_new_impl(struct drm_device *dev, struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; unsigned sz; + bool use_vram = false; switch (flags & MSM_BO_CACHE_MASK) { case MSM_BO_UNCACHED: @@ -592,15 +599,23 @@ static int msm_gem_new_impl(struct drm_device *dev, return -EINVAL; } - sz = sizeof(*msm_obj); if (!iommu_present(&platform_bus_type)) + use_vram = true; + else if ((flags & MSM_BO_STOLEN) && priv->vram.size) + use_vram = true; + + if (WARN_ON(use_vram && !priv->vram.size)) + return -EINVAL; + + sz = sizeof(*msm_obj); + if (use_vram) sz += sizeof(struct drm_mm_node); msm_obj = kzalloc(sz, GFP_KERNEL); if (!msm_obj) return -ENOMEM; - if (!iommu_present(&platform_bus_type)) + if (use_vram) msm_obj->vram_node = (void *)&msm_obj[1]; msm_obj->flags = flags; @@ -630,7 +645,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, if (ret) goto fail; - if (iommu_present(&platform_bus_type)) { + if (use_pages(obj)) { ret = drm_gem_object_init(dev, obj, size); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 8fbbd0594c46..85d481e29276 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -21,6 +21,9 @@ #include #include "msm_drv.h" +/* Additional internal-use only BO flags: */ +#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ + struct msm_gem_object { struct drm_gem_object base; @@ -59,7 +62,7 @@ struct msm_gem_object { struct reservation_object _resv; /* For physically contiguous buffers. Used when we don't have - * an IOMMU. + * an IOMMU. Also used for stolen/splashscreen buffer. */ struct drm_mm_node *vram_node; };