From: Chris Wilson Date: Fri, 19 Aug 2016 15:54:27 +0000 (+0100) Subject: drm/i915: Embed the io-mapping struct inside drm_i915_private X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=f7bbe7883c3f119714fd09a8ceaac8075ba04dfe;p=linux-beck.git drm/i915: Embed the io-mapping struct inside drm_i915_private As io_mapping.h now always allocates the struct, we can avoid that allocation and extra pointer dance by embedding the struct inside drm_i915_private Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20160819155428.1670-5-chris@chris-wilson.co.uk --- diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0e1f5dde2e87..5398af7f7580 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -891,7 +891,7 @@ i915_gem_gtt_pread(struct drm_device *dev, * and write to user memory which may result into page * faults, and so we cannot perform this under struct_mutex. */ - if (slow_user_access(ggtt->mappable, page_base, + if (slow_user_access(&ggtt->mappable, page_base, page_offset, user_data, page_length, false)) { ret = -EFAULT; @@ -1187,11 +1187,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, * If the object is non-shmem backed, we retry again with the * path that handles page fault. */ - if (fast_user_write(ggtt->mappable, page_base, + if (fast_user_write(&ggtt->mappable, page_base, page_offset, user_data, page_length)) { hit_slow_path = true; mutex_unlock(&dev->struct_mutex); - if (slow_user_access(ggtt->mappable, + if (slow_user_access(&ggtt->mappable, page_base, page_offset, user_data, page_length, true)) { diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 4192066ff60e..601156c353cc 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -474,7 +474,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, offset += page << PAGE_SHIFT; } - vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset); + vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset); cache->page = page; cache->vaddr = (unsigned long)vaddr; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a18363a0d8c5..b90fdcee992a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2794,7 +2794,6 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) if (dev_priv->mm.aliasing_ppgtt) { struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - ppgtt->base.cleanup(&ppgtt->base); kfree(ppgtt); } @@ -2811,7 +2810,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt->base.cleanup(&ggtt->base); arch_phys_wc_del(ggtt->mtrr); - io_mapping_free(ggtt->mappable); + io_mapping_fini(&ggtt->mappable); } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -3209,9 +3208,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) if (!HAS_LLC(dev_priv)) ggtt->base.mm.color_adjust = i915_gtt_color_adjust; - ggtt->mappable = - io_mapping_create_wc(ggtt->mappable_base, ggtt->mappable_end); - if (!ggtt->mappable) { + if (!io_mapping_init_wc(&dev_priv->ggtt.mappable, + dev_priv->ggtt.mappable_base, + dev_priv->ggtt.mappable_end)) { ret = -EIO; goto out_gtt_cleanup; } @@ -3681,7 +3680,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ptr = vma->iomap; if (ptr == NULL) { - ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable, + ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, vma->node.start, vma->node.size); if (ptr == NULL) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index a15cea73f729..a9aec25535ac 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -439,13 +439,13 @@ struct i915_address_space { */ struct i915_ggtt { struct i915_address_space base; + struct io_mapping mappable; /* Mapping to our CPU mappable region */ size_t stolen_size; /* Total size of stolen memory */ size_t stolen_usable_size; /* Total size minus BIOS reserved */ size_t stolen_reserved_base; size_t stolen_reserved_size; u64 mappable_end; /* End offset that we can CPU map */ - struct io_mapping *mappable; /* Mapping to our CPU mappable region */ phys_addr_t mappable_base; /* PA of our GMADR */ /** "Graphics Stolen Memory" holds the global PTEs */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 84dd5bc06db3..41ec7a183c73 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -729,7 +729,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, * captures what the GPU read. */ - s = io_mapping_map_atomic_wc(ggtt->mappable, + s = io_mapping_map_atomic_wc(&ggtt->mappable, reloc_offset); memcpy_fromio(d, s, PAGE_SIZE); io_mapping_unmap_atomic(s); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3cf8d02064a8..a24bc8c7889f 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_wc(dev_priv->ggtt.mappable, + regs = io_mapping_map_wc(&dev_priv->ggtt.mappable, overlay->flip_addr, PAGE_SIZE); @@ -1489,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) regs = (struct overlay_registers __iomem *) overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, + regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable, overlay->flip_addr); return regs;