]> git.karo-electronics.de Git - linux-beck.git/commitdiff
drm/i915/cmdparser: Only cache the dst vmap
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 18 Aug 2016 16:17:13 +0000 (17:17 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 18 Aug 2016 21:36:59 +0000 (22:36 +0100)
For simplicity, we want to continue using a contiguous mapping of the
command buffer, but we can reduce the number of vmappings we hold by
switching over to a page-by-page copy from the user batch buffer to the
shadow. The cost for saving one linear mapping is about 5% in trivial
workloads - which is more or less the overhead in calling kmap_atomic().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-34-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_cmd_parser.c

index 5d9ea163d1c812c99271f0bce98d34119e587f95..d1858f80d64c6f6d854f54b646659a42bae2169d 100644 (file)
@@ -946,7 +946,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 {
        unsigned int src_needs_clflush;
        unsigned int dst_needs_clflush;
-       void *src, *dst;
+       void *dst, *ptr;
+       int offset, n;
        int ret;
 
        ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush);
@@ -959,19 +960,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                goto unpin_src;
        }
 
-       src = i915_gem_object_pin_map(src_obj, I915_MAP_WB);
-       if (IS_ERR(src)) {
-               dst = src;
-               goto unpin_dst;
-       }
-
        dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
        if (IS_ERR(dst))
-               goto unmap_src;
+               goto unpin_dst;
 
-       src += batch_start_offset;
-       if (src_needs_clflush)
-               drm_clflush_virt_range(src, batch_len);
+       ptr = dst;
+       offset = offset_in_page(batch_start_offset);
 
        /* We can avoid clflushing partial cachelines before the write if we
         * only every write full cache-lines. Since we know that both the
@@ -982,13 +976,24 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
        if (dst_needs_clflush & CLFLUSH_BEFORE)
                batch_len = roundup(batch_len, boot_cpu_data.x86_clflush_size);
 
-       memcpy(dst, src, batch_len);
+       for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) {
+               int len = min_t(int, batch_len, PAGE_SIZE - offset);
+               void *vaddr;
+
+               vaddr = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+               if (src_needs_clflush)
+                       drm_clflush_virt_range(vaddr + offset, len);
+               memcpy(ptr, vaddr + offset, len);
+               kunmap_atomic(vaddr);
+
+               ptr += len;
+               batch_len -= len;
+               offset = 0;
+       }
 
        /* dst_obj is returned with vmap pinned */
        *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
 
-unmap_src:
-       i915_gem_object_unpin_map(src_obj);
 unpin_dst:
        i915_gem_obj_finish_shmem_access(dst_obj);
 unpin_src: