]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge tag 'drm-intel-fixes-2014-06-06' of git://anongit.freedesktop.org/drm-intel...
authorDave Airlie <airlied@redhat.com>
Fri, 6 Jun 2014 09:07:09 +0000 (19:07 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 6 Jun 2014 09:07:09 +0000 (19:07 +1000)
> Bunch of stuff for 3.16 still:
> - Mipi dsi panel support for byt. Finally! From Shobhit&others. I've
>   squeezed this in since it's a regression compared to vbios and we've
>   been ridiculed about it a bit too often ...
> - connection_mutex deadlock fix in get_connector (only affects i915).
> - Core patches from Matt's primary plane from Matt Roper, I've pushed the
>   i915 stuff to 3.17.
> - vlv power well sequencing fixes from Jesse.
> - Fix for cursor size changes from Chris.
> - agpbusy fixes from Ville.
> - A few smaller things.
>

* tag 'drm-intel-fixes-2014-06-06' of git://anongit.freedesktop.org/drm-intel: (32 commits)
  drm/i915: BDW: Adding missing cursor offsets.
  drm: Fix getconnector connection_mutex locking
  drm/i915/bdw: Only use 2g GGTT for 32b platforms
  drm/i915: Nuke pipe A quirk on i830M
  drm/i915: fix display power sw state reporting
  drm/i915: Always apply cursor width changes
  drm/i915: tell the user if both KMS and UMS are disabled
  drm/plane-helper: Add drm_plane_helper_check_update() (v3)
  drm: Check CRTC compatibility in setplane
  drm/i915: use VBT to determine whether to enumerate the VGA port
  drm/i915: Don't WARN about ring idle bit on gen2
  drm/i915: Silence the WARN if the user tries to GTT mmap an incoherent object
  drm/i915: Move the C3 LP write bit setup to gen3_init_clock_gating() for KMS
  drm/i915: Enable interrupt-based AGPBUSY# enable on 85x
  drm/i915: Flip the sense of AGPBUSY_DIS bit
  drm/i915: Set AGPBUSY# bit in init_clock_gating
  drm/i915/vlv: add pll assertion when disabling DPIO common well
  drm/i915/vlv: move DPIO common reset de-assert into __vlv_set_power_well
  drm/i915/vlv: re-order power wells so DPIO common comes after TX
  drm/i915/vlv: move CRI refclk enable into __vlv_set_power_well
  ...

1  2 
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_uncore.c

index 8e78703e45cf5148be6cfeeb9839adc450a1699c,4063ee1dec17629d3477e2eb91bf323287a6d621..49414d30e8d42d3068cbd21990f1403e3e8b1f5a
@@@ -251,6 -251,18 +251,6 @@@ struct intel_ddi_plls 
  #define WATCH_LISTS   0
  #define WATCH_GTT     0
  
 -#define I915_GEM_PHYS_CURSOR_0 1
 -#define I915_GEM_PHYS_CURSOR_1 2
 -#define I915_GEM_PHYS_OVERLAY_REGS 3
 -#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
 -
 -struct drm_i915_gem_phys_object {
 -      int id;
 -      struct page **page_list;
 -      drm_dma_handle_t *handle;
 -      struct drm_i915_gem_object *cur_obj;
 -};
 -
  struct opregion_header;
  struct opregion_acpi;
  struct opregion_swsci;
@@@ -1094,6 -1106,9 +1094,6 @@@ struct i915_gem_mm 
        /** Bit 6 swizzling required for Y tiling */
        uint32_t bit_6_swizzle_y;
  
 -      /* storage for physical objects */
 -      struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 -
        /* accounting, useful for userland debugging */
        spinlock_t object_stat_lock;
        size_t object_memory;
@@@ -1207,6 -1222,7 +1207,7 @@@ struct intel_vbt_data 
        unsigned int lvds_use_ssc:1;
        unsigned int display_clock_mode:1;
        unsigned int fdi_rx_polarity_inverted:1;
+       unsigned int has_mipi:1;
        int lvds_ssc_freq;
        unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
  
  
        /* MIPI DSI */
        struct {
+               u16 port;
                u16 panel_id;
                struct mipi_config *config;
                struct mipi_pps_data *pps;
@@@ -1697,7 -1714,7 +1699,7 @@@ struct drm_i915_gem_object 
        struct drm_file *pin_filp;
  
        /** for phy allocated objects */
 -      struct drm_i915_gem_phys_object *phys_obj;
 +      drm_dma_handle_t *phys_handle;
  
        union {
                struct i915_gem_userptr {
@@@ -1901,9 -1918,6 +1903,9 @@@ struct drm_i915_cmd_table 
  #define IS_ULT(dev)           (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
  #define IS_HSW_GT3(dev)               (IS_HASWELL(dev) && \
                                 ((dev)->pdev->device & 0x00F0) == 0x0020)
 +/* ULX machines are also considered ULT. */
 +#define IS_HSW_ULX(dev)               ((dev)->pdev->device == 0x0A0E || \
 +                               (dev)->pdev->device == 0x0A1E)
  #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
  
  /*
@@@ -2160,12 -2174,10 +2162,12 @@@ void i915_gem_vma_destroy(struct i915_v
  #define PIN_MAPPABLE 0x1
  #define PIN_NONBLOCK 0x2
  #define PIN_GLOBAL 0x4
 +#define PIN_OFFSET_BIAS 0x8
 +#define PIN_OFFSET_MASK (~4095)
  int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
                                     struct i915_address_space *vm,
                                     uint32_t alignment,
 -                                   unsigned flags);
 +                                   uint64_t flags);
  int __must_check i915_vma_unbind(struct i915_vma *vma);
  int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
  void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@@ -2287,8 -2299,13 +2289,8 @@@ i915_gem_object_pin_to_display_plane(st
                                     u32 alignment,
                                     struct intel_engine_cs *pipelined);
  void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
 -int i915_gem_attach_phys_object(struct drm_device *dev,
 -                              struct drm_i915_gem_object *obj,
 -                              int id,
 +int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                                int align);
 -void i915_gem_detach_phys_object(struct drm_device *dev,
 -                               struct drm_i915_gem_object *obj);
 -void i915_gem_free_all_phys_object(struct drm_device *dev);
  int i915_gem_open(struct drm_device *dev, struct drm_file *file);
  void i915_gem_release(struct drm_device *dev, struct drm_file *file);
  
@@@ -2415,8 -2432,6 +2417,8 @@@ int __must_check i915_gem_evict_somethi
                                          int min_size,
                                          unsigned alignment,
                                          unsigned cache_level,
 +                                        unsigned long start,
 +                                        unsigned long end,
                                          unsigned flags);
  int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
  int i915_gem_evict_everything(struct drm_device *dev);
index bbcd35abf2471cbd14736b654802c4b44c0bc0f1,ea09d1a0fbd9de8b309f9d5688bf4abbb7e36321..f36126383d260166a950ad20c72b69637246c550
@@@ -47,6 -47,11 +47,6 @@@ i915_gem_object_wait_rendering(struct d
  static void
  i915_gem_object_retire(struct drm_i915_gem_object *obj);
  
 -static int i915_gem_phys_pwrite(struct drm_device *dev,
 -                              struct drm_i915_gem_object *obj,
 -                              struct drm_i915_gem_pwrite *args,
 -                              struct drm_file *file);
 -
  static void i915_gem_write_fence(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj);
  static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
@@@ -209,128 -214,6 +209,128 @@@ i915_gem_get_aperture_ioctl(struct drm_
        return 0;
  }
  
 +static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
 +{
 +      drm_dma_handle_t *phys = obj->phys_handle;
 +
 +      if (!phys)
 +              return;
 +
 +      if (obj->madv == I915_MADV_WILLNEED) {
 +              struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
 +              char *vaddr = phys->vaddr;
 +              int i;
 +
 +              for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
 +                      struct page *page = shmem_read_mapping_page(mapping, i);
 +                      if (!IS_ERR(page)) {
 +                              char *dst = kmap_atomic(page);
 +                              memcpy(dst, vaddr, PAGE_SIZE);
 +                              drm_clflush_virt_range(dst, PAGE_SIZE);
 +                              kunmap_atomic(dst);
 +
 +                              set_page_dirty(page);
 +                              mark_page_accessed(page);
 +                              page_cache_release(page);
 +                      }
 +                      vaddr += PAGE_SIZE;
 +              }
 +              i915_gem_chipset_flush(obj->base.dev);
 +      }
 +
 +#ifdef CONFIG_X86
 +      set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
 +#endif
 +      drm_pci_free(obj->base.dev, phys);
 +      obj->phys_handle = NULL;
 +}
 +
 +int
 +i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 +                          int align)
 +{
 +      drm_dma_handle_t *phys;
 +      struct address_space *mapping;
 +      char *vaddr;
 +      int i;
 +
 +      if (obj->phys_handle) {
 +              if ((unsigned long)obj->phys_handle->vaddr & (align -1))
 +                      return -EBUSY;
 +
 +              return 0;
 +      }
 +
 +      if (obj->madv != I915_MADV_WILLNEED)
 +              return -EFAULT;
 +
 +      if (obj->base.filp == NULL)
 +              return -EINVAL;
 +
 +      /* create a new object */
 +      phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
 +      if (!phys)
 +              return -ENOMEM;
 +
 +      vaddr = phys->vaddr;
 +#ifdef CONFIG_X86
 +      set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
 +#endif
 +      mapping = file_inode(obj->base.filp)->i_mapping;
 +      for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
 +              struct page *page;
 +              char *src;
 +
 +              page = shmem_read_mapping_page(mapping, i);
 +              if (IS_ERR(page)) {
 +#ifdef CONFIG_X86
 +                      set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
 +#endif
 +                      drm_pci_free(obj->base.dev, phys);
 +                      return PTR_ERR(page);
 +              }
 +
 +              src = kmap_atomic(page);
 +              memcpy(vaddr, src, PAGE_SIZE);
 +              kunmap_atomic(src);
 +
 +              mark_page_accessed(page);
 +              page_cache_release(page);
 +
 +              vaddr += PAGE_SIZE;
 +      }
 +
 +      obj->phys_handle = phys;
 +      return 0;
 +}
 +
 +static int
 +i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 +                   struct drm_i915_gem_pwrite *args,
 +                   struct drm_file *file_priv)
 +{
 +      struct drm_device *dev = obj->base.dev;
 +      void *vaddr = obj->phys_handle->vaddr + args->offset;
 +      char __user *user_data = to_user_ptr(args->data_ptr);
 +
 +      if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
 +              unsigned long unwritten;
 +
 +              /* The physical object once assigned is fixed for the lifetime
 +               * of the obj, so we can safely drop the lock and continue
 +               * to access vaddr.
 +               */
 +              mutex_unlock(&dev->struct_mutex);
 +              unwritten = copy_from_user(vaddr, user_data, args->size);
 +              mutex_lock(&dev->struct_mutex);
 +              if (unwritten)
 +                      return -EFAULT;
 +      }
 +
 +      i915_gem_chipset_flush(dev);
 +      return 0;
 +}
 +
  void *i915_gem_object_alloc(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -1047,8 -930,8 +1047,8 @@@ i915_gem_pwrite_ioctl(struct drm_devic
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
 -      if (obj->phys_obj) {
 -              ret = i915_gem_phys_pwrite(dev, obj, args, file);
 +      if (obj->phys_handle) {
 +              ret = i915_gem_phys_pwrite(obj, args, file);
                goto out;
        }
  
@@@ -1544,7 -1427,7 +1544,7 @@@ int i915_gem_fault(struct vm_area_struc
  
        /* Access to snoopable pages through the GTT is incoherent. */
        if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
-               ret = -EINVAL;
+               ret = -EFAULT;
                goto unlock;
        }
  
@@@ -3374,14 -3257,12 +3374,14 @@@ static struct i915_vma 
  i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                           struct i915_address_space *vm,
                           unsigned alignment,
 -                         unsigned flags)
 +                         uint64_t flags)
  {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
 -      size_t gtt_max =
 +      unsigned long start =
 +              flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
 +      unsigned long end =
                flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
        struct i915_vma *vma;
        int ret;
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
 -      if (obj->base.size > gtt_max) {
 -              DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
 +      if (obj->base.size > end) {
 +              DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
                          obj->base.size,
                          flags & PIN_MAPPABLE ? "mappable" : "total",
 -                        gtt_max);
 +                        end);
                return ERR_PTR(-E2BIG);
        }
  
  search_free:
        ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
                                                  size, alignment,
 -                                                obj->cache_level, 0, gtt_max,
 +                                                obj->cache_level,
 +                                                start, end,
                                                  DRM_MM_SEARCH_DEFAULT,
                                                  DRM_MM_CREATE_DEFAULT);
        if (ret) {
                ret = i915_gem_evict_something(dev, vm, size, alignment,
 -                                             obj->cache_level, flags);
 +                                             obj->cache_level,
 +                                             start, end,
 +                                             flags);
                if (ret == 0)
                        goto search_free;
  
@@@ -4014,30 -3892,11 +4014,30 @@@ i915_gem_ring_throttle(struct drm_devic
        return ret;
  }
  
 +static bool
 +i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
 +{
 +      struct drm_i915_gem_object *obj = vma->obj;
 +
 +      if (alignment &&
 +          vma->node.start & (alignment - 1))
 +              return true;
 +
 +      if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
 +              return true;
 +
 +      if (flags & PIN_OFFSET_BIAS &&
 +          vma->node.start < (flags & PIN_OFFSET_MASK))
 +              return true;
 +
 +      return false;
 +}
 +
  int
  i915_gem_object_pin(struct drm_i915_gem_object *obj,
                    struct i915_address_space *vm,
                    uint32_t alignment,
 -                  unsigned flags)
 +                  uint64_t flags)
  {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        struct i915_vma *vma;
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                        return -EBUSY;
  
 -              if ((alignment &&
 -                   vma->node.start & (alignment - 1)) ||
 -                  (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
 +              if (i915_vma_misplaced(vma, alignment, flags)) {
                        WARN(vma->pin_count,
                             "bo is already pinned with incorrect alignment:"
                             " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
                             i915_gem_obj_offset(obj, vm), alignment,
 -                           flags & PIN_MAPPABLE,
 +                           !!(flags & PIN_MAPPABLE),
                             obj->map_and_fenceable);
                        ret = i915_vma_unbind(vma);
                        if (ret)
@@@ -4420,6 -4281,9 +4420,6 @@@ void i915_gem_free_object(struct drm_ge
  
        trace_i915_gem_object_destroy(obj);
  
 -      if (obj->phys_obj)
 -              i915_gem_detach_phys_object(dev, obj);
 -
        list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
                int ret;
  
                }
        }
  
 +      i915_gem_object_detach_phys(obj);
 +
        /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
         * before progressing. */
        if (obj->stolen)
@@@ -4894,7 -4756,7 +4894,7 @@@ i915_gem_load(struct drm_device *dev
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
  
        /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
-       if (IS_GEN3(dev)) {
+       if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
                I915_WRITE(MI_ARB_STATE,
                           _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
        }
        register_oom_notifier(&dev_priv->mm.oom_notifier);
  }
  
 -/*
 - * Create a physically contiguous memory object for this object
 - * e.g. for cursor + overlay regs
 - */
 -static int i915_gem_init_phys_object(struct drm_device *dev,
 -                                   int id, int size, int align)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_i915_gem_phys_object *phys_obj;
 -      int ret;
 -
 -      if (dev_priv->mm.phys_objs[id - 1] || !size)
 -              return 0;
 -
 -      phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
 -      if (!phys_obj)
 -              return -ENOMEM;
 -
 -      phys_obj->id = id;
 -
 -      phys_obj->handle = drm_pci_alloc(dev, size, align);
 -      if (!phys_obj->handle) {
 -              ret = -ENOMEM;
 -              goto kfree_obj;
 -      }
 -#ifdef CONFIG_X86
 -      set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
 -#endif
 -
 -      dev_priv->mm.phys_objs[id - 1] = phys_obj;
 -
 -      return 0;
 -kfree_obj:
 -      kfree(phys_obj);
 -      return ret;
 -}
 -
 -static void i915_gem_free_phys_object(struct drm_device *dev, int id)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_i915_gem_phys_object *phys_obj;
 -
 -      if (!dev_priv->mm.phys_objs[id - 1])
 -              return;
 -
 -      phys_obj = dev_priv->mm.phys_objs[id - 1];
 -      if (phys_obj->cur_obj) {
 -              i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
 -      }
 -
 -#ifdef CONFIG_X86
 -      set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
 -#endif
 -      drm_pci_free(dev, phys_obj->handle);
 -      kfree(phys_obj);
 -      dev_priv->mm.phys_objs[id - 1] = NULL;
 -}
 -
 -void i915_gem_free_all_phys_object(struct drm_device *dev)
 -{
 -      int i;
 -
 -      for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
 -              i915_gem_free_phys_object(dev, i);
 -}
 -
 -void i915_gem_detach_phys_object(struct drm_device *dev,
 -                               struct drm_i915_gem_object *obj)
 -{
 -      struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
 -      char *vaddr;
 -      int i;
 -      int page_count;
 -
 -      if (!obj->phys_obj)
 -              return;
 -      vaddr = obj->phys_obj->handle->vaddr;
 -
 -      page_count = obj->base.size / PAGE_SIZE;
 -      for (i = 0; i < page_count; i++) {
 -              struct page *page = shmem_read_mapping_page(mapping, i);
 -              if (!IS_ERR(page)) {
 -                      char *dst = kmap_atomic(page);
 -                      memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
 -                      kunmap_atomic(dst);
 -
 -                      drm_clflush_pages(&page, 1);
 -
 -                      set_page_dirty(page);
 -                      mark_page_accessed(page);
 -                      page_cache_release(page);
 -              }
 -      }
 -      i915_gem_chipset_flush(dev);
 -
 -      obj->phys_obj->cur_obj = NULL;
 -      obj->phys_obj = NULL;
 -}
 -
 -int
 -i915_gem_attach_phys_object(struct drm_device *dev,
 -                          struct drm_i915_gem_object *obj,
 -                          int id,
 -                          int align)
 -{
 -      struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -      int ret = 0;
 -      int page_count;
 -      int i;
 -
 -      if (id > I915_MAX_PHYS_OBJECT)
 -              return -EINVAL;
 -
 -      if (obj->phys_obj) {
 -              if (obj->phys_obj->id == id)
 -                      return 0;
 -              i915_gem_detach_phys_object(dev, obj);
 -      }
 -
 -      /* create a new object */
 -      if (!dev_priv->mm.phys_objs[id - 1]) {
 -              ret = i915_gem_init_phys_object(dev, id,
 -                                              obj->base.size, align);
 -              if (ret) {
 -                      DRM_ERROR("failed to init phys object %d size: %zu\n",
 -                                id, obj->base.size);
 -                      return ret;
 -              }
 -      }
 -
 -      /* bind to the object */
 -      obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
 -      obj->phys_obj->cur_obj = obj;
 -
 -      page_count = obj->base.size / PAGE_SIZE;
 -
 -      for (i = 0; i < page_count; i++) {
 -              struct page *page;
 -              char *dst, *src;
 -
 -              page = shmem_read_mapping_page(mapping, i);
 -              if (IS_ERR(page))
 -                      return PTR_ERR(page);
 -
 -              src = kmap_atomic(page);
 -              dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 -              memcpy(dst, src, PAGE_SIZE);
 -              kunmap_atomic(src);
 -
 -              mark_page_accessed(page);
 -              page_cache_release(page);
 -      }
 -
 -      return 0;
 -}
 -
 -static int
 -i915_gem_phys_pwrite(struct drm_device *dev,
 -                   struct drm_i915_gem_object *obj,
 -                   struct drm_i915_gem_pwrite *args,
 -                   struct drm_file *file_priv)
 -{
 -      void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
 -      char __user *user_data = to_user_ptr(args->data_ptr);
 -
 -      if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
 -              unsigned long unwritten;
 -
 -              /* The physical object once assigned is fixed for the lifetime
 -               * of the obj, so we can safely drop the lock and continue
 -               * to access vaddr.
 -               */
 -              mutex_unlock(&dev->struct_mutex);
 -              unwritten = copy_from_user(vaddr, user_data, args->size);
 -              mutex_lock(&dev->struct_mutex);
 -              if (unwritten)
 -                      return -EFAULT;
 -      }
 -
 -      i915_gem_chipset_flush(dev);
 -      return 0;
 -}
 -
  void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  {
        struct drm_i915_file_private *file_priv = file->driver_priv;
index 931b906f292a95e12d0884cc85d0650f8904d962,f3e23e15c46d5f29a505e144fb63384f6ff89bde..eec820aec0224a975325a439c350e57036bd5dc9
@@@ -35,35 -35,25 +35,35 @@@ static void chv_setup_private_ppat(stru
  
  bool intel_enable_ppgtt(struct drm_device *dev, bool full)
  {
 -      if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
 +      if (i915.enable_ppgtt == 0)
                return false;
  
        if (i915.enable_ppgtt == 1 && full)
                return false;
  
 +      return true;
 +}
 +
 +static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
 +{
 +      if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
 +              return 0;
 +
 +      if (enable_ppgtt == 1)
 +              return 1;
 +
 +      if (enable_ppgtt == 2 && HAS_PPGTT(dev))
 +              return 2;
 +
  #ifdef CONFIG_INTEL_IOMMU
        /* Disable ppgtt on SNB if VT-d is on. */
        if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
                DRM_INFO("Disabling PPGTT because VT-d is on\n");
 -              return false;
 +              return 0;
        }
  #endif
  
 -      /* Full ppgtt disabled by default for now due to issues. */
 -      if (full)
 -              return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
 -      else
 -              return HAS_ALIASING_PPGTT(dev);
 +      return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
  }
  
  
@@@ -1049,9 -1039,7 +1049,9 @@@ alloc
        if (ret == -ENOSPC && !retried) {
                ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
                                               GEN6_PD_SIZE, GEN6_PD_ALIGN,
 -                                             I915_CACHE_NONE, 0);
 +                                             I915_CACHE_NONE,
 +                                             0, dev_priv->gtt.base.total,
 +                                             0);
                if (ret)
                        return ret;
  
@@@ -1775,6 -1763,13 +1775,13 @@@ static inline unsigned int gen8_get_tot
        bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
        if (bdw_gmch_ctl)
                bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+ #ifdef CONFIG_X86_32
+       /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
+       if (bdw_gmch_ctl > 4)
+               bdw_gmch_ctl = 4;
+ #endif
        return bdw_gmch_ctl << 20;
  }
  
@@@ -2064,14 -2059,6 +2071,14 @@@ int i915_gem_gtt_init(struct drm_devic
        if (intel_iommu_gfx_mapped)
                DRM_INFO("VT-d active for gfx access\n");
  #endif
 +      /*
 +       * i915.enable_ppgtt is read-only, so do an early pass to validate the
 +       * user's requested state against the hardware/driver capabilities.  We
 +       * do this now so that we can print out any log messages once rather
 +       * than every time we check intel_enable_ppgtt().
 +       */
 +      i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
 +      DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
  
        return 0;
  }
index 6b6509656f16f6e18c2d8623a21887d26865621b,3d43da6d34824636afdcd0f0998852f66eabfb62..1ee98f121a00fbe4be26cf60187d9b7a81983caf
@@@ -598,71 -598,47 +598,71 @@@ parse_edp(struct drm_i915_private *dev_
  
        dev_priv->vbt.edp_pps = *edp_pps;
  
 -      dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
 -              DP_LINK_BW_1_62;
 +      switch (edp_link_params->rate) {
 +      case EDP_RATE_1_62:
 +              dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
 +              break;
 +      case EDP_RATE_2_7:
 +              dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
 +              break;
 +      default:
 +              DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
 +                            edp_link_params->rate);
 +              break;
 +      }
 +
        switch (edp_link_params->lanes) {
 -      case 0:
 +      case EDP_LANE_1:
                dev_priv->vbt.edp_lanes = 1;
                break;
 -      case 1:
 +      case EDP_LANE_2:
                dev_priv->vbt.edp_lanes = 2;
                break;
 -      case 3:
 -      default:
 +      case EDP_LANE_4:
                dev_priv->vbt.edp_lanes = 4;
                break;
 +      default:
 +              DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
 +                            edp_link_params->lanes);
 +              break;
        }
 +
        switch (edp_link_params->preemphasis) {
 -      case 0:
 +      case EDP_PREEMPHASIS_NONE:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
                break;
 -      case 1:
 +      case EDP_PREEMPHASIS_3_5dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
                break;
 -      case 2:
 +      case EDP_PREEMPHASIS_6dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
                break;
 -      case 3:
 +      case EDP_PREEMPHASIS_9_5dB:
                dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
                break;
 +      default:
 +              DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
 +                            edp_link_params->preemphasis);
 +              break;
        }
 +
        switch (edp_link_params->vswing) {
 -      case 0:
 +      case EDP_VSWING_0_4V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
                break;
 -      case 1:
 +      case EDP_VSWING_0_6V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
                break;
 -      case 2:
 +      case EDP_VSWING_0_8V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
                break;
 -      case 3:
 +      case EDP_VSWING_1_2V:
                dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
                break;
 +      default:
 +              DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
 +                            edp_link_params->vswing);
 +              break;
        }
  }
  
@@@ -744,6 -720,10 +744,10 @@@ parse_mipi(struct drm_i915_private *dev
        int i, panel_id, seq_size;
        u16 block_size;
  
+       /* parse MIPI blocks only if LFP type is MIPI */
+       if (!dev_priv->vbt.has_mipi)
+               return;
        /* Initialize this to undefined indicating no generic MIPI support */
        dev_priv->vbt.dsi.panel_id = MIPI_DSI_UNDEFINED_PANEL_ID;
  
@@@ -1059,6 -1039,15 +1063,15 @@@ parse_device_mapping(struct drm_i915_pr
                        /* skip the device block if device type is invalid */
                        continue;
                }
+               if (p_child->common.dvo_port >= DVO_PORT_MIPIA
+                   && p_child->common.dvo_port <= DVO_PORT_MIPID
+                   &&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
+                       DRM_DEBUG_KMS("Found MIPI as LFP\n");
+                       dev_priv->vbt.has_mipi = 1;
+                       dev_priv->vbt.dsi.port = p_child->common.dvo_port;
+               }
                child_dev_ptr = dev_priv->vbt.child_dev + count;
                count++;
                memcpy((void *)child_dev_ptr, (void *)p_child,
index 7a4c7c98378a91901c7bea3c7cbd966a26bebd2e,98b704998d816b1028aeb546823b363ae07d8764..efd3cf50cb0f6a647d670a0f2a7b886b4e1d0b8a
@@@ -1484,14 -1484,6 +1484,6 @@@ static void intel_reset_dpio(struct drm
        if (!IS_VALLEYVIEW(dev))
                return;
  
-       /*
-        * Enable the CRI clock source so we can get at the display and the
-        * reference clock for VGA hotplug / manual detection.
-        */
-       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                  DPLL_REFA_CLK_ENABLE_VLV |
-                  DPLL_INTEGRATED_CRI_CLK_VLV);
        if (IS_CHERRYVIEW(dev)) {
                enum dpio_phy phy;
                u32 val;
  
        } else {
                /*
-                * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-                *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
-                *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
-                *   b. The other bits such as sfr settings / modesel may all
-                *      be set to 0.
-                *
-                * This should only be done on init and resume from S3 with
-                * both PLLs disabled, or we risk losing DPIO and PLL
-                * synchronization.
+                * If DPIO has already been reset, e.g. by BIOS, just skip all
+                * this.
                 */
-               I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+               if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
+                       return;
+               /*
+                * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+                * Need to assert and de-assert PHY SB reset by gating the
+                * common lane power, then un-gating it.
+                * Simply ungating isn't enough to reset the PHY enough to get
+                * ports and lanes running.
+                */
+               __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
+                                    false);
+               __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
+                                    true);
        }
  }
  
@@@ -7868,29 -7866,33 +7866,33 @@@ static void i845_update_cursor(struct d
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       bool visible = base != 0;
-       u32 cntl;
+       uint32_t cntl;
  
-       if (intel_crtc->cursor_visible == visible)
-               return;
-       cntl = I915_READ(_CURACNTR);
-       if (visible) {
+       if (base != intel_crtc->cursor_base) {
                /* On these chipsets we can only modify the base whilst
                 * the cursor is disabled.
                 */
+               if (intel_crtc->cursor_cntl) {
+                       I915_WRITE(_CURACNTR, 0);
+                       POSTING_READ(_CURACNTR);
+                       intel_crtc->cursor_cntl = 0;
+               }
                I915_WRITE(_CURABASE, base);
+               POSTING_READ(_CURABASE);
+       }
  
-               cntl &= ~(CURSOR_FORMAT_MASK);
-               /* XXX width must be 64, stride 256 => 0x00 << 28 */
-               cntl |= CURSOR_ENABLE |
+       /* XXX width must be 64, stride 256 => 0x00 << 28 */
+       cntl = 0;
+       if (base)
+               cntl = (CURSOR_ENABLE |
                        CURSOR_GAMMA_ENABLE |
-                       CURSOR_FORMAT_ARGB;
-       } else
-               cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
-       I915_WRITE(_CURACNTR, cntl);
-       intel_crtc->cursor_visible = visible;
+                       CURSOR_FORMAT_ARGB);
+       if (intel_crtc->cursor_cntl != cntl) {
+               I915_WRITE(_CURACNTR, cntl);
+               POSTING_READ(_CURACNTR);
+               intel_crtc->cursor_cntl = cntl;
+       }
  }
  
  static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
-       bool visible = base != 0;
-       if (intel_crtc->cursor_visible != visible) {
-               int16_t width = intel_crtc->cursor_width;
-               uint32_t cntl = I915_READ(CURCNTR(pipe));
-               if (base) {
-                       cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
-                       cntl |= MCURSOR_GAMMA_ENABLE;
+       uint32_t cntl;
  
-                       switch (width) {
+       cntl = 0;
+       if (base) {
+               cntl = MCURSOR_GAMMA_ENABLE;
+               switch (intel_crtc->cursor_width) {
                        case 64:
                                cntl |= CURSOR_MODE_64_ARGB_AX;
                                break;
                        default:
                                WARN_ON(1);
                                return;
-                       }
-                       cntl |= pipe << 28; /* Connect to correct pipe */
-               } else {
-                       cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
-                       cntl |= CURSOR_MODE_DISABLE;
                }
+               cntl |= pipe << 28; /* Connect to correct pipe */
+       }
+       if (intel_crtc->cursor_cntl != cntl) {
                I915_WRITE(CURCNTR(pipe), cntl);
-               intel_crtc->cursor_visible = visible;
+               POSTING_READ(CURCNTR(pipe));
+               intel_crtc->cursor_cntl = cntl;
        }
        /* and commit changes on next vblank */
-       POSTING_READ(CURCNTR(pipe));
        I915_WRITE(CURBASE(pipe), base);
        POSTING_READ(CURBASE(pipe));
  }
@@@ -7943,15 -7939,12 +7939,12 @@@ static void ivb_update_cursor(struct dr
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
-       bool visible = base != 0;
-       if (intel_crtc->cursor_visible != visible) {
-               int16_t width = intel_crtc->cursor_width;
-               uint32_t cntl = I915_READ(CURCNTR(pipe));
-               if (base) {
-                       cntl &= ~CURSOR_MODE;
-                       cntl |= MCURSOR_GAMMA_ENABLE;
-                       switch (width) {
+       uint32_t cntl;
+       cntl = 0;
+       if (base) {
+               cntl = MCURSOR_GAMMA_ENABLE;
+               switch (intel_crtc->cursor_width) {
                        case 64:
                                cntl |= CURSOR_MODE_64_ARGB_AX;
                                break;
                        default:
                                WARN_ON(1);
                                return;
-                       }
-               } else {
-                       cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
-                       cntl |= CURSOR_MODE_DISABLE;
                }
-               if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-                       cntl |= CURSOR_PIPE_CSC_ENABLE;
-                       cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
-               }
-               I915_WRITE(CURCNTR(pipe), cntl);
+       }
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               cntl |= CURSOR_PIPE_CSC_ENABLE;
  
-               intel_crtc->cursor_visible = visible;
+       if (intel_crtc->cursor_cntl != cntl) {
+               I915_WRITE(CURCNTR(pipe), cntl);
+               POSTING_READ(CURCNTR(pipe));
+               intel_crtc->cursor_cntl = cntl;
        }
        /* and commit changes on next vblank */
-       POSTING_READ(CURCNTR(pipe));
        I915_WRITE(CURBASE(pipe), base);
        POSTING_READ(CURBASE(pipe));
  }
@@@ -7994,7 -7984,6 +7984,6 @@@ static void intel_crtc_update_cursor(st
        int x = intel_crtc->cursor_x;
        int y = intel_crtc->cursor_y;
        u32 base = 0, pos = 0;
-       bool visible;
  
        if (on)
                base = intel_crtc->cursor_addr;
        }
        pos |= y << CURSOR_Y_SHIFT;
  
-       visible = base != 0;
-       if (!visible && !intel_crtc->cursor_visible)
+       if (base == 0 && intel_crtc->cursor_base == 0)
                return;
  
        I915_WRITE(CURPOS(pipe), pos);
                i845_update_cursor(crtc, base);
        else
                i9xx_update_cursor(crtc, base);
+       intel_crtc->cursor_base = base;
  }
  
  static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                addr = i915_gem_obj_ggtt_offset(obj);
        } else {
                int align = IS_I830(dev) ? 16 * 1024 : 256;
 -              ret = i915_gem_attach_phys_object(dev, obj,
 -                                                (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
 -                                                align);
 +              ret = i915_gem_object_attach_phys(obj, align);
                if (ret) {
                        DRM_DEBUG_KMS("failed to attach phys object\n");
                        goto fail_locked;
                }
 -              addr = obj->phys_obj->handle->busaddr;
 +              addr = obj->phys_handle->busaddr;
        }
  
        if (IS_GEN2(dev))
  
   finish:
        if (intel_crtc->cursor_bo) {
 -              if (INTEL_INFO(dev)->cursor_needs_physical) {
 -                      if (intel_crtc->cursor_bo != obj)
 -                              i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
 -              } else
 +              if (!INTEL_INFO(dev)->cursor_needs_physical)
                        i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
                drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
        }
@@@ -10990,6 -10984,9 +10979,9 @@@ static void intel_crtc_init(struct drm_
                intel_crtc->plane = !pipe;
        }
  
+       intel_crtc->cursor_base = ~0;
+       intel_crtc->cursor_cntl = ~0;
        init_waitqueue_head(&intel_crtc->vbl_wait);
  
        BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
@@@ -11103,7 -11100,7 +11095,7 @@@ static void intel_setup_outputs(struct 
  
        intel_lvds_init(dev);
  
-       if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev))
+       if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev) && dev_priv->vbt.int_crt_support)
                intel_crt_init(dev);
  
        if (HAS_DDI(dev)) {
@@@ -11618,9 -11615,6 +11610,6 @@@ static struct intel_quirk intel_quirks[
        /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
        { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
  
-       /* 830 needs to leave pipe A & dpll A up */
-       { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
        /* Lenovo U160 cannot use SSC on LVDS */
        { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
  
@@@ -11803,6 -11797,15 +11792,6 @@@ void intel_modeset_init(struct drm_devi
        }
  }
  
 -static void
 -intel_connector_break_all_links(struct intel_connector *connector)
 -{
 -      connector->base.dpms = DRM_MODE_DPMS_OFF;
 -      connector->base.encoder = NULL;
 -      connector->encoder->connectors_active = false;
 -      connector->encoder->base.crtc = NULL;
 -}
 -
  static void intel_enable_pipe_a(struct drm_device *dev)
  {
        struct intel_connector *connector;
@@@ -11891,17 -11894,8 +11880,17 @@@ static void intel_sanitize_crtc(struct 
                        if (connector->encoder->base.crtc != &crtc->base)
                                continue;
  
 -                      intel_connector_break_all_links(connector);
 +                      connector->base.dpms = DRM_MODE_DPMS_OFF;
 +                      connector->base.encoder = NULL;
                }
 +              /* multiple connectors may have the same encoder:
 +               *  handle them and break crtc link separately */
 +              list_for_each_entry(connector, &dev->mode_config.connector_list,
 +                                  base.head)
 +                      if (connector->encoder->base.crtc == &crtc->base) {
 +                              connector->encoder->base.crtc = NULL;
 +                              connector->encoder->connectors_active = false;
 +                      }
  
                WARN_ON(crtc->active);
                crtc->base.enabled = false;
@@@ -11992,8 -11986,6 +11981,8 @@@ static void intel_sanitize_encoder(stru
                                      encoder->base.name);
                        encoder->disable(encoder);
                }
 +              encoder->base.crtc = NULL;
 +              encoder->connectors_active = false;
  
                /* Inconsistent output/port/pipe state happens presumably due to
                 * a bug in one of the get_hw_state functions. Or someplace else
                                    base.head) {
                        if (connector->encoder != encoder)
                                continue;
 -
 -                      intel_connector_break_all_links(connector);
 +                      connector->base.dpms = DRM_MODE_DPMS_OFF;
 +                      connector->base.encoder = NULL;
                }
        }
        /* Enabled encoders without active connectors will be fixed in
index 906d06f73e51f5d96d82c03a234c949876c34fae,8bbd4f910663959b5c6cb7595d7dc6c167e254b8..d1e53abec1b5f808fbf5457f565f2343689e41f6
@@@ -2113,43 -2113,6 +2113,43 @@@ static void intel_print_wm_latency(stru
        }
  }
  
 +static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 +                                  uint16_t wm[5], uint16_t min)
 +{
 +      int level, max_level = ilk_wm_max_level(dev_priv->dev);
 +
 +      if (wm[0] >= min)
 +              return false;
 +
 +      wm[0] = max(wm[0], min);
 +      for (level = 1; level <= max_level; level++)
 +              wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
 +
 +      return true;
 +}
 +
 +static void snb_wm_latency_quirk(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      bool changed;
 +
 +      /*
 +       * The BIOS provided WM memory latency values are often
 +       * inadequate for high resolution displays. Adjust them.
 +       */
 +      changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
 +              ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
 +              ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
 +
 +      if (!changed)
 +              return;
 +
 +      DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
 +      intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
 +      intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
 +      intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
 +}
 +
  static void ilk_setup_wm_latency(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
        intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
        intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
 +
 +      if (IS_GEN6(dev))
 +              snb_wm_latency_quirk(dev);
  }
  
  static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
@@@ -5388,8 -5348,11 +5388,11 @@@ static void valleyview_init_clock_gatin
        I915_WRITE(GEN6_UCGCTL2,
                   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  
-       /* WaDisableL3Bank2xClockGate:vlv */
-       I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+       /* WaDisableL3Bank2xClockGate:vlv
+        * Disabling L3 clock gating- MMIO 940c[25] = 1
+        * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
+       I915_WRITE(GEN7_UCGCTL4,
+                  I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
  
        I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
  
@@@ -5541,6 -5504,12 +5544,12 @@@ static void gen3_init_clock_gating(stru
  
        /* IIR "flip pending" means done if this bit is set */
        I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+       /* interrupts should cause a wake up from C3 */
+       I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+       /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
+       I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
  }
  
  static void i85x_init_clock_gating(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+       /* interrupts should cause a wake up from C3 */
+       I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
+                  _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
  }
  
  static void i830_init_clock_gating(struct drm_device *dev)
@@@ -5599,10 -5572,25 +5612,25 @@@ bool intel_display_power_enabled_sw(str
                                    enum intel_display_power_domain domain)
  {
        struct i915_power_domains *power_domains;
+       struct i915_power_well *power_well;
+       bool is_enabled;
+       int i;
+       if (dev_priv->pm.suspended)
+               return false;
  
        power_domains = &dev_priv->power_domains;
+       is_enabled = true;
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+               if (power_well->always_on)
+                       continue;
  
-       return power_domains->domain_use_count[domain];
+               if (!power_well->count) {
+                       is_enabled = false;
+                       break;
+               }
+       }
+       return is_enabled;
  }
  
  bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
@@@ -5745,13 -5733,34 +5773,34 @@@ static bool i9xx_always_on_power_well_e
        return true;
  }
  
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
-                              struct i915_power_well *power_well, bool enable)
void __vlv_set_power_well(struct drm_i915_private *dev_priv,
+                         enum punit_power_well power_well_id, bool enable)
  {
-       enum punit_power_well power_well_id = power_well->data;
+       struct drm_device *dev = dev_priv->dev;
        u32 mask;
        u32 state;
        u32 ctrl;
+       enum pipe pipe;
+       if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+               if (enable) {
+                       /*
+                        * Enable the CRI clock source so we can get at the
+                        * display and the reference clock for VGA
+                        * hotplug / manual detection.
+                        */
+                       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                                  DPLL_REFA_CLK_ENABLE_VLV |
+                                  DPLL_INTEGRATED_CRI_CLK_VLV);
+                       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+               } else {
+                       for_each_pipe(pipe)
+                               assert_pll_disabled(dev_priv, pipe);
+                       /* Assert common reset */
+                       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
+                                  ~DPIO_CMNRST);
+               }
+       }
  
        mask = PUNIT_PWRGT_MASK(power_well_id);
        state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  
  out:
        mutex_unlock(&dev_priv->rps.hw_lock);
+       /*
+        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
+        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
+        *   b. The other bits such as sfr settings / modesel may all
+        *      be set to 0.
+        *
+        * This should only be done on init and resume from S3 with
+        * both PLLs disabled, or we risk losing DPIO and PLL
+        * synchronization.
+        */
+       if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
+               I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+ }
+ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+                              struct i915_power_well *power_well, bool enable)
+ {
+       enum punit_power_well power_well_id = power_well->data;
+       __vlv_set_power_well(dev_priv, power_well_id, enable);
  }
  
  static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@@ -6113,12 -6144,6 +6184,6 @@@ static struct i915_power_well vlv_power
                .data = PUNIT_POWER_WELL_DISP2D,
                .ops = &vlv_display_power_well_ops,
        },
-       {
-               .name = "dpio-common",
-               .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
-               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-               .ops = &vlv_dpio_power_well_ops,
-       },
        {
                .name = "dpio-tx-b-01",
                .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
                .ops = &vlv_dpio_power_well_ops,
                .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
        },
+       {
+               .name = "dpio-common",
+               .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+               .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
+               .ops = &vlv_dpio_power_well_ops,
+       },
  };
  
  #define set_power_wells(power_domains, __power_wells) ({              \
index 2f5d5d3f004346fa2ab9dcc18316e09875cb501e,a19bceccb287acfd9b49599b3955e455f9f922c9..79cba593df0d33dd1bb1fe6732e94d62ec39fe73
@@@ -185,8 -185,6 +185,8 @@@ static void vlv_force_wake_reset(struc
  {
        __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
                           _MASKED_BIT_DISABLE(0xffff));
 +      __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
 +                         _MASKED_BIT_DISABLE(0xffff));
        /* something from same cacheline, but !FORCEWAKE_VLV */
        __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  }
@@@ -393,26 -391,8 +393,8 @@@ void intel_uncore_early_sanitize(struc
  
  void intel_uncore_sanitize(struct drm_device *dev)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 reg_val;
        /* BIOS often leaves RC6 enabled, but disable it for hw init */
        intel_disable_gt_powersave(dev);
-       /* Turn off power gate, require especially for the BIOS less system */
-       if (IS_VALLEYVIEW(dev)) {
-               mutex_lock(&dev_priv->rps.hw_lock);
-               reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
-               if (reg_val & (PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_RENDER) |
-                              PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_MEDIA) |
-                              PUNIT_PWRGT_PWR_GATE(PUNIT_POWER_WELL_DISP2D)))
-                       vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
-               mutex_unlock(&dev_priv->rps.hw_lock);
-       }
  }
  
  /*
@@@ -967,6 -947,9 +949,9 @@@ static int i965_do_reset(struct drm_dev
  {
        int ret;
  
+       /* FIXME: i965g/gm need a display save/restore for gpu reset. */
+       return -ENODEV;
        /*
         * Set the domains we want to reset (GRDOM/bits 2 and 3) as
         * well as the reset bit (GR/bit 0).  Setting the GR bit