]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
drm/i915: Convert vm->dev backpointer to vm->i915
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 29 Nov 2016 09:50:08 +0000 (09:50 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 29 Nov 2016 11:38:00 +0000 (11:38 +0000)
99% of the time we access i915_address_space->dev we want the i915
device and not the drm device, so let's store the drm_i915_private
backpointer instead. The only real complication here are the inlines
in i915_vma.h where drm_i915_private is not yet defined and so we have
to choose an alternate path for our asserts.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161129095008.32622-1-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_fence_reg.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h

index 3d4e07e9734f59e6898faa596c910e6589702d47..8ebefb6f6cf21033986368b05b7ed703fdc98134 100644 (file)
@@ -3532,7 +3532,7 @@ err_unpin_display:
 void
 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
-       lockdep_assert_held(&vma->vm->dev->struct_mutex);
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
        if (WARN_ON(vma->obj->pin_display == 0))
                return;
index bd08814b015cb238c639e2becec6917901890d4e..739ede7c89ed4d9681202a8d468f98450888d1b1 100644 (file)
@@ -96,7 +96,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
                         u64 start, u64 end,
                         unsigned flags)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct list_head eviction_list;
        struct list_head *phases[] = {
                &vm->inactive_list,
@@ -106,7 +106,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
        struct i915_vma *vma, *next;
        int ret;
 
-       lockdep_assert_held(&vm->dev->struct_mutex);
+       lockdep_assert_held(&vm->i915->drm.struct_mutex);
        trace_i915_gem_evict(vm, min_size, alignment, flags);
 
        /*
@@ -162,7 +162,7 @@ search_again:
                 * back to userspace to give our workqueues time to
                 * acquire our locks and unpin the old scanouts.
                 */
-               return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
+               return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
        }
 
        /* Not everything in the GGTT is tracked via vma (otherwise we
@@ -217,7 +217,7 @@ i915_gem_evict_for_vma(struct i915_vma *target)
 {
        struct drm_mm_node *node, *next;
 
-       lockdep_assert_held(&target->vm->dev->struct_mutex);
+       lockdep_assert_held(&target->vm->i915->drm.struct_mutex);
 
        list_for_each_entry_safe(node, next,
                        &target->vm->mm.head_node.node_list,
@@ -272,11 +272,11 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
        struct i915_vma *vma, *next;
        int ret;
 
-       lockdep_assert_held(&vm->dev->struct_mutex);
+       lockdep_assert_held(&vm->i915->drm.struct_mutex);
        trace_i915_gem_evict_vm(vm);
 
        if (do_idle) {
-               struct drm_i915_private *dev_priv = to_i915(vm->dev);
+               struct drm_i915_private *dev_priv = vm->i915;
 
                if (i915_is_ggtt(vm)) {
                        ret = i915_gem_switch_to_kernel_context(dev_priv);
index 0efa3571afc3cbdbaeda14f5338884dbe3294305..d006bcb69e91aec03d941d4891684eb4305bdeb7 100644 (file)
@@ -290,7 +290,7 @@ i915_vma_put_fence(struct i915_vma *vma)
 {
        struct drm_i915_fence_reg *fence = vma->fence;
 
-       assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+       assert_rpm_wakelock_held(vma->vm->i915);
 
        if (!fence)
                return 0;
@@ -313,7 +313,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
        }
 
        /* Wait for completion of pending flips which consume fences */
-       if (intel_has_pending_fb_unpin(&dev_priv->drm))
+       if (intel_has_pending_fb_unpin(dev_priv))
                return ERR_PTR(-EAGAIN);
 
        return ERR_PTR(-EDEADLK);
@@ -346,7 +346,7 @@ i915_vma_get_fence(struct i915_vma *vma)
        /* Note that we revoke fences on runtime suspend. Therefore the user
         * must keep the device awake whilst using the fence.
         */
-       assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+       assert_rpm_wakelock_held(vma->vm->i915);
 
        /* Just update our place in the LRU if our fence is getting reused. */
        if (vma->fence) {
@@ -357,7 +357,7 @@ i915_vma_get_fence(struct i915_vma *vma)
                        return 0;
                }
        } else if (set) {
-               fence = fence_find(to_i915(vma->vm->dev));
+               fence = fence_find(vma->vm->i915);
                if (IS_ERR(fence))
                        return PTR_ERR(fence);
        } else
index 6cee70745fea553e97eb68c1de1e43ca0ba0b3f0..18d0340e2be41c08fd9112967c28c70d69ea8458 100644 (file)
@@ -380,7 +380,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
 
 #define kmap_px(px) kmap_page_dma(px_base(px))
 #define kunmap_px(ppgtt, vaddr) \
-               kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
+               kunmap_page_dma((ppgtt)->base.i915, (vaddr))
 
 #define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
 #define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
@@ -470,7 +470,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
        scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
                                      I915_CACHE_LLC);
 
-       fill_px(to_i915(vm->dev), pt, scratch_pte);
+       fill_px(vm->i915, pt, scratch_pte);
 }
 
 static void gen6_initialize_pt(struct i915_address_space *vm,
@@ -483,7 +483,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
        scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
                                     I915_CACHE_LLC, 0);
 
-       fill32_px(to_i915(vm->dev), pt, scratch_pte);
+       fill32_px(vm->i915, pt, scratch_pte);
 }
 
 static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
@@ -531,7 +531,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
 
        scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
 
-       fill_px(to_i915(vm->dev), pd, scratch_pde);
+       fill_px(vm->i915, pd, scratch_pde);
 }
 
 static int __pdp_init(struct drm_i915_private *dev_priv,
@@ -612,7 +612,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
 
        scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
 
-       fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
+       fill_px(vm->i915, pdp, scratch_pdpe);
 }
 
 static void gen8_initialize_pml4(struct i915_address_space *vm,
@@ -623,7 +623,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
        scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
                                          I915_CACHE_LLC);
 
-       fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
+       fill_px(vm->i915, pml4, scratch_pml4e);
 }
 
 static void
@@ -710,7 +710,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
  */
 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 {
-       ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask;
+       ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
 }
 
 /* Removes entries from a single page table, releasing it if it's empty.
@@ -773,7 +773,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
                        pde_vaddr = kmap_px(pd);
                        pde_vaddr[pde] = scratch_pde;
                        kunmap_px(ppgtt, pde_vaddr);
-                       free_pt(to_i915(vm->dev), pt);
+                       free_pt(vm->i915, pt);
                }
        }
 
@@ -809,7 +809,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
                                pdpe_vaddr[pdpe] = scratch_pdpe;
                                kunmap_px(ppgtt, pdpe_vaddr);
                        }
-                       free_pd(to_i915(vm->dev), pd);
+                       free_pd(vm->i915, pd);
                }
        }
 
@@ -830,7 +830,6 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
                                  uint64_t start,
                                  uint64_t length)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_directory_pointer *pdp;
        uint64_t pml4e;
@@ -838,7 +837,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
        gen8_ppgtt_pml4e_t scratch_pml4e =
                gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
 
-       GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev)));
+       GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
 
        gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
                if (WARN_ON(!pml4->pdps[pml4e]))
@@ -849,7 +848,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
                        pml4e_vaddr = kmap_px(pml4);
                        pml4e_vaddr[pml4e] = scratch_pml4e;
                        kunmap_px(ppgtt, pml4e_vaddr);
-                       free_pdp(dev_priv, pdp);
+                       free_pdp(vm->i915, pdp);
                }
        }
 }
@@ -951,7 +950,7 @@ static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
 
 static int gen8_init_scratch(struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        int ret;
 
        ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
@@ -998,7 +997,7 @@ free_scratch_page:
 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 {
        enum vgt_g2v_type msg;
-       struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+       struct drm_i915_private *dev_priv = ppgtt->base.i915;
        int i;
 
        if (USES_FULL_48BIT_PPGTT(dev_priv)) {
@@ -1028,7 +1027,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 
 static void gen8_free_scratch(struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
 
        if (USES_FULL_48BIT_PPGTT(dev_priv))
                free_pdp(dev_priv, vm->scratch_pdp);
@@ -1055,7 +1054,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
 
 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+       struct drm_i915_private *dev_priv = ppgtt->base.i915;
        int i;
 
        for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
@@ -1070,7 +1069,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 
        if (intel_vgpu_active(dev_priv))
@@ -1108,7 +1107,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
                                     uint64_t length,
                                     unsigned long *new_pts)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_page_table *pt;
        uint32_t pde;
 
@@ -1169,7 +1168,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
                                  uint64_t length,
                                  unsigned long *new_pds)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_page_directory *pd;
        uint32_t pdpe;
        uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
@@ -1222,7 +1221,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
                                  uint64_t length,
                                  unsigned long *new_pdps)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_page_directory_pointer *pdp;
        uint32_t pml4e;
 
@@ -1297,7 +1296,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        unsigned long *new_page_dirs, *new_page_tables;
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_page_directory *pd;
        const uint64_t orig_start = start;
        const uint64_t orig_length = length;
@@ -1446,7 +1445,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
 
 err_out:
        for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
-               gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]);
+               gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
 
        return ret;
 }
@@ -1580,7 +1579,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
  */
 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+       struct drm_i915_private *dev_priv = ppgtt->base.i915;
        int ret;
 
        ret = gen8_init_scratch(&ppgtt->base);
@@ -1923,7 +1922,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
                               uint64_t start_in, uint64_t length_in)
 {
        DECLARE_BITMAP(new_page_tables, I915_PDES);
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_table *pt;
@@ -2010,7 +2009,7 @@ unwind_out:
 
 static int gen6_init_scratch(struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        int ret;
 
        ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
@@ -2030,7 +2029,7 @@ static int gen6_init_scratch(struct i915_address_space *vm)
 
 static void gen6_free_scratch(struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
 
        free_pt(dev_priv, vm->scratch_pt);
        cleanup_scratch_page(dev_priv, &vm->scratch_page);
@@ -2040,7 +2039,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_directory *pd = &ppgtt->pd;
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_page_table *pt;
        uint32_t pde;
 
@@ -2056,7 +2055,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
 {
        struct i915_address_space *vm = &ppgtt->base;
-       struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+       struct drm_i915_private *dev_priv = ppgtt->base.i915;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        bool retried = false;
        int ret;
@@ -2121,7 +2120,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
 
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+       struct drm_i915_private *dev_priv = ppgtt->base.i915;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int ret;
 
@@ -2172,7 +2171,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
                           struct drm_i915_private *dev_priv)
 {
-       ppgtt->base.dev = &dev_priv->drm;
+       ppgtt->base.i915 = dev_priv;
 
        if (INTEL_INFO(dev_priv)->gen < 8)
                return gen6_ppgtt_init(ppgtt);
@@ -2394,7 +2393,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
                                  enum i915_cache_level level,
                                  u32 unused)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        gen8_pte_t __iomem *pte =
                (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
                (offset >> PAGE_SHIFT);
@@ -2410,7 +2409,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     uint64_t start,
                                     enum i915_cache_level level, u32 unused)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        struct sgt_iter sgt_iter;
        gen8_pte_t __iomem *gtt_entries;
@@ -2475,7 +2474,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
                                  enum i915_cache_level level,
                                  u32 flags)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        gen6_pte_t __iomem *pte =
                (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
                (offset >> PAGE_SHIFT);
@@ -2497,7 +2496,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     uint64_t start,
                                     enum i915_cache_level level, u32 flags)
 {
-       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        struct sgt_iter sgt_iter;
        gen6_pte_t __iomem *gtt_entries;
@@ -2617,7 +2616,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
                         enum i915_cache_level cache_level,
                         u32 flags)
 {
-       struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+       struct drm_i915_private *i915 = vma->vm->i915;
        struct drm_i915_gem_object *obj = vma->obj;
        u32 pte_flags = 0;
        int ret;
@@ -2649,7 +2648,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
                                 enum i915_cache_level cache_level,
                                 u32 flags)
 {
-       struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+       struct drm_i915_private *i915 = vma->vm->i915;
        u32 pte_flags;
        int ret;
 
@@ -2683,7 +2682,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
-       struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+       struct drm_i915_private *i915 = vma->vm->i915;
        struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
        const u64 size = min(vma->size, vma->node.size);
 
@@ -2925,8 +2924,8 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
 
 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 {
-       struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
-       struct pci_dev *pdev = ggtt->base.dev->pdev;
+       struct drm_i915_private *dev_priv = ggtt->base.i915;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        phys_addr_t phys_addr;
        int ret;
 
@@ -3038,12 +3037,12 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 
        iounmap(ggtt->gsm);
-       cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page);
+       cleanup_scratch_page(vm->i915, &vm->scratch_page);
 }
 
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+       struct drm_i915_private *dev_priv = ggtt->base.i915;
        struct pci_dev *pdev = dev_priv->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
@@ -3092,7 +3091,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+       struct drm_i915_private *dev_priv = ggtt->base.i915;
        struct pci_dev *pdev = dev_priv->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
@@ -3145,7 +3144,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
 
 static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+       struct drm_i915_private *dev_priv = ggtt->base.i915;
        int ret;
 
        ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3180,7 +3179,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int ret;
 
-       ggtt->base.dev = &dev_priv->drm;
+       ggtt->base.i915 = dev_priv;
 
        if (INTEL_GEN(dev_priv) <= 5)
                ret = i915_gmch_probe(ggtt);
index 4f35be4c26c773765b384e78aa0350743a973b7e..8965bbb13db76258457e79b9fe3262969ddde4eb 100644 (file)
@@ -220,7 +220,7 @@ struct i915_pml4 {
 struct i915_address_space {
        struct drm_mm mm;
        struct i915_gem_timeline timeline;
-       struct drm_device *dev;
+       struct drm_i915_private *i915;
        /* Every address space belongs to a struct file - except for the global
         * GTT that is owned by the driver (and so @file is set to NULL). In
         * principle, no information should leak from one context to another
index c85e7b06bdba72bfd6dc0d4ce3f1b595e27a9284..62ad375de6cac9eeb098355939405350e406364a 100644 (file)
@@ -119,7 +119,7 @@ i915_tiling_ok(struct drm_i915_private *dev_priv,
 
 static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
 {
-       struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+       struct drm_i915_private *dev_priv = vma->vm->i915;
        u32 size;
 
        if (!i915_vma_is_map_and_fenceable(vma))
index a792dcb902b51d337f46f2c1ad7ad1ea737f4c8a..4c91a68ecb6d57eb20f04c978f1e21d06b93fe8a 100644 (file)
@@ -198,9 +198,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
        void __iomem *ptr;
 
        /* Access through the GTT requires the device to be awake. */
-       assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+       assert_rpm_wakelock_held(vma->vm->i915);
 
-       lockdep_assert_held(&vma->vm->dev->struct_mutex);
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
        if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
                return IO_ERR_PTR(-ENODEV);
 
@@ -347,7 +347,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 static int
 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-       struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+       struct drm_i915_private *dev_priv = vma->vm->i915;
        struct drm_i915_gem_object *obj = vma->obj;
        u64 start, end;
        int ret;
@@ -469,7 +469,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
        unsigned int bound = vma->flags;
        int ret;
 
-       lockdep_assert_held(&vma->vm->dev->struct_mutex);
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
        GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
        GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
@@ -567,7 +567,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 
                for_each_active(active, idx) {
                        ret = i915_gem_active_retire(&vma->last_read[idx],
-                                                  &vma->vm->dev->struct_mutex);
+                                                    &vma->vm->i915->drm.struct_mutex);
                        if (ret)
                                break;
                }
index 85446f0b0b3f1d4a757449ff7e935aa8758d6138..21be74c61065af1cbeed209995843291654dfb98 100644 (file)
@@ -282,7 +282,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
  */
 static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
-       lockdep_assert_held(&vma->vm->dev->struct_mutex);
+       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
        GEM_BUG_ON(vma->iomap == NULL);
        i915_vma_unpin(vma);
 }
@@ -311,7 +311,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
 static inline bool
 i915_vma_pin_fence(struct i915_vma *vma)
 {
-       lockdep_assert_held(&vma->vm->dev->struct_mutex);
+       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
        if (vma->fence) {
                vma->fence->pin_count++;
                return true;
@@ -330,7 +330,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
 static inline void
 i915_vma_unpin_fence(struct i915_vma *vma)
 {
-       lockdep_assert_held(&vma->vm->dev->struct_mutex);
+       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
        if (vma->fence) {
                GEM_BUG_ON(vma->fence->pin_count <= 0);
                vma->fence->pin_count--;
index 04636552e98acb9f82a8d593199efba81560d3f7..3b9e60aeca51a865f6f1b0cfcfa5fb83acc05d60 100644 (file)
@@ -4228,9 +4228,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
        udelay(100);
 }
 
-bool intel_has_pending_fb_unpin(struct drm_device *dev)
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc;
 
        /* Note that we don't need to be called with mode_config.lock here
@@ -4240,7 +4239,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
         * cannot claim and pin a new fb without at least acquring the
         * struct_mutex and so serialising with us.
         */
-       for_each_intel_crtc(dev, crtc) {
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
                if (atomic_read(&crtc->unpin_work_count) == 0)
                        continue;
 
index 2b71567b52925f6762b7e013efa0b12073d825a0..7f43c2f7d9d6a2bcc2913946d515947d25ba9818 100644 (file)
@@ -1219,7 +1219,7 @@ unsigned int intel_fb_xy_to_linear(int x, int y,
 void intel_add_fb_offsets(int *x, int *y,
                          const struct intel_plane_state *state, int plane);
 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
-bool intel_has_pending_fb_unpin(struct drm_device *dev);
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
 void intel_mark_busy(struct drm_i915_private *dev_priv);
 void intel_mark_idle(struct drm_i915_private *dev_priv);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);