From: Chris Wilson Date: Thu, 2 Feb 2017 13:27:21 +0000 (+0000) Subject: drm/i915: Recreate internal objects with single page segments if dmar fails X-Git-Tag: v4.12-rc1~116^2~34^2~363 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=bb96dcf5830e5d81a1da2e2a14e6c0f7dfc64348;p=karo-tx-linux.git drm/i915: Recreate internal objects with single page segments if dmar fails If we fail to dma-map the object, the most common cause is lack of space inside the SW-IOTLB due to fragmentation. If we recreate the_sg_table using segments of PAGE_SIZE (and single page allocations), we may succeed in remapping the scatterlist. First became a significant problem for the mock selftests after commit 5584f1b1d73e ("drm/i915: fix i915 running as dom0 under Xen") increased the max_order. Fixes: 920cf4194954 ("drm/i915: Introduce an internal allocator for disposable private objects") Fixes: 5584f1b1d73e ("drm/i915: fix i915 running as dom0 under Xen") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170202132721.12711-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin Cc: # v4.10-rc1+ --- diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 2b9d5e94a8ae..fc950abbe400 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c @@ -48,24 +48,12 @@ static struct sg_table * i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - unsigned int npages = obj->base.size / PAGE_SIZE; struct sg_table *st; struct scatterlist *sg; + unsigned int npages; int max_order; gfp_t gfp; - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return ERR_PTR(-ENOMEM); - - if (sg_alloc_table(st, npages, GFP_KERNEL)) { - kfree(st); - return ERR_PTR(-ENOMEM); - } - - sg = st->sgl; - st->nents = 0; - max_order = MAX_ORDER; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { @@ -87,6 +75,20 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) gfp |= __GFP_DMA32; } +create_st: + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + npages = obj->base.size / PAGE_SIZE; + if (sg_alloc_table(st, npages, GFP_KERNEL)) { + kfree(st); + return ERR_PTR(-ENOMEM); + } + + sg = st->sgl; + st->nents = 0; + do { int order = min(fls(npages) - 1, max_order); struct page *page; @@ -114,8 +116,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) sg = __sg_next(sg); } while (1); - if (i915_gem_gtt_prepare_pages(obj, st)) + if (i915_gem_gtt_prepare_pages(obj, st)) { + /* Failed to dma-map try again with single page sg segments */ + if (get_order(st->sgl->length)) { + internal_free_pages(st); + max_order = 0; + goto create_st; + } goto err; + } /* Mark the pages as dontneed whilst they are still pinned. As soon * as they are unpinned they are allowed to be reaped by the shrinker,