]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'drm-intel-next-2014-02-07' of ssh://git.freedesktop.org/git/drm-intel...
authorDave Airlie <airlied@redhat.com>
Thu, 27 Feb 2014 04:36:01 +0000 (14:36 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 27 Feb 2014 04:36:01 +0000 (14:36 +1000)
- Yet more steps towards atomic modeset from Ville.
- DP panel power sequencing improvements from Paulo.
- irq code cleanups from Ville.
- 5.4 GHz dp lane clock support for bdw/hsw from Todd.
- Clock readout support for hsw/bdw (aka fastboot) from Jesse.
- Make pipe underruns report at ERROR level (Ville). This is to check our
  improved watermarks code.
- Full ppgtt support from Ben for gen7.
- More fbc fixes and improvements from Ville all over the place, unfortunately
  not yet enabled by default on more platforms.
- w/a cleanups from Ville.
- HiZ stall optimization settings (Chia-I Wu).
- Display register mmio offset refactor patch from Antti.
- RPS improvements for corner-cases from Jeff McGee.

* tag 'drm-intel-next-2014-02-07' of ssh://git.freedesktop.org/git/drm-intel: (166 commits)
  drm/i915: Update rps interrupt limits
  drm/i915: Restore rps/rc6 on reset
  drm/i915: Prevent recursion by retiring requests when the ring is full
  drm/i915: Generate a hang error code
  drm/i915: unify FLIP_DONE macro names
  drm/i915: vlv: s/spin_lock_irqsave/spin_lock/ in irq handler
  drm/i915: factor out valleyview_pipestat_irq_handler
  drm/i915: vlv: don't unmask IIR[DISPLAY_PIPE_A/B_VBLANK] interrupt
  drm/i915: Reorganize display pipe register accesses
  drm/i915: Treat using a purged buffer as a source of EFAULT
  drm/i915: Convert EFAULT into a silent SIGBUS
  drm/i915: release mutex in i915_gem_init()'s error path
  drm/i915: check for oom when allocating private_default_ctx
  drm/i915/vlv: WA to fix Voltage not getting dropped to Vmin when Gfx is power gated.
  drm/i915: Get rid of acthd based guilty batch search
  drm/i915: Use hangcheck score to find guilty context
  drm/i915: Drop WaDisablePSDDualDispatchEnable:ivb for IVB GT2
  drm/i915: Fix IVB GT2 WaDisableDopClockGating and WaDisablePSDDualDispatchEnable
  drm/i915: Don't access snooped pages through the GTT (even for error capture)
  drm/i915: Only print information for filing bug reports once
  ...

Conflicts:
drivers/gpu/drm/i915/intel_dp.c

1  2 
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
include/drm/drm_dp_helper.h

index 9fd44f5f3b3b40ff5f1acf497b613a36e153cac2,4850494bd548ffc50e7c3ec838fc0e926d2edac3..f33902ff2c229e70011012a83f085c39fd5a98e0
@@@ -14,6 -14,7 +14,7 @@@ i915-y := i915_drv.o i915_dma.o i915_ir
          i915_gem_gtt.o \
          i915_gem_stolen.o \
          i915_gem_tiling.o \
+         i915_params.o \
          i915_sysfs.o \
          i915_trace_points.o \
          i915_ums.o \
@@@ -37,6 -38,7 +38,6 @@@
          intel_ringbuffer.o \
          intel_overlay.o \
          intel_sprite.o \
 -        intel_opregion.o \
          intel_sideband.o \
          intel_uncore.o \
          dvo_ch7xxx.o \
@@@ -49,7 -51,7 +50,7 @@@
  
  i915-$(CONFIG_COMPAT)   += i915_ioc32.o
  
 -i915-$(CONFIG_ACPI)   += intel_acpi.o
 +i915-$(CONFIG_ACPI)   += intel_acpi.o intel_opregion.o
  
  i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
  
index df77e20e3c3d00ee9173d4c160274f0e837c26de,728b9c3f04212f7b1d99efc65b49e94665a37cea..9d8ca2a36fde7c7ce5c42963e8ba0f81f7c502c5
@@@ -58,7 -58,8 +58,8 @@@ enum pipe 
        PIPE_A = 0,
        PIPE_B,
        PIPE_C,
-       I915_MAX_PIPES
+       _PIPE_EDP,
+       I915_MAX_PIPES = _PIPE_EDP
  };
  #define pipe_name(p) ((p) + 'A')
  
@@@ -66,7 -67,8 +67,8 @@@ enum transcoder 
        TRANSCODER_A = 0,
        TRANSCODER_B,
        TRANSCODER_C,
-       TRANSCODER_EDP = 0xF,
+       TRANSCODER_EDP,
+       I915_MAX_TRANSCODERS
  };
  #define transcoder_name(t) ((t) + 'A')
  
@@@ -295,53 -297,80 +297,80 @@@ struct intel_display_error_state
  
  struct drm_i915_error_state {
        struct kref ref;
+       struct timeval time;
+       /* Generic register state */
        u32 eir;
        u32 pgtbl_er;
        u32 ier;
        u32 ccid;
        u32 derrmr;
        u32 forcewake;
-       bool waiting[I915_NUM_RINGS];
-       u32 pipestat[I915_MAX_PIPES];
-       u32 tail[I915_NUM_RINGS];
-       u32 head[I915_NUM_RINGS];
-       u32 ctl[I915_NUM_RINGS];
-       u32 ipeir[I915_NUM_RINGS];
-       u32 ipehr[I915_NUM_RINGS];
-       u32 instdone[I915_NUM_RINGS];
-       u32 acthd[I915_NUM_RINGS];
-       u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
-       u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
-       u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
-       /* our own tracking of ring head and tail */
-       u32 cpu_ring_head[I915_NUM_RINGS];
-       u32 cpu_ring_tail[I915_NUM_RINGS];
        u32 error; /* gen6+ */
        u32 err_int; /* gen7 */
-       u32 bbstate[I915_NUM_RINGS];
-       u32 instpm[I915_NUM_RINGS];
-       u32 instps[I915_NUM_RINGS];
-       u32 extra_instdone[I915_NUM_INSTDONE_REG];
-       u32 seqno[I915_NUM_RINGS];
-       u64 bbaddr[I915_NUM_RINGS];
-       u32 fault_reg[I915_NUM_RINGS];
        u32 done_reg;
-       u32 faddr[I915_NUM_RINGS];
+       u32 gac_eco;
+       u32 gam_ecochk;
+       u32 gab_ctl;
+       u32 gfx_mode;
+       u32 extra_instdone[I915_NUM_INSTDONE_REG];
+       u32 pipestat[I915_MAX_PIPES];
        u64 fence[I915_MAX_NUM_FENCES];
-       struct timeval time;
+       struct intel_overlay_error_state *overlay;
+       struct intel_display_error_state *display;
        struct drm_i915_error_ring {
                bool valid;
+               /* Software tracked state */
+               bool waiting;
+               int hangcheck_score;
+               enum intel_ring_hangcheck_action hangcheck_action;
+               int num_requests;
+               /* our own tracking of ring head and tail */
+               u32 cpu_ring_head;
+               u32 cpu_ring_tail;
+               u32 semaphore_seqno[I915_NUM_RINGS - 1];
+               /* Register state */
+               u32 tail;
+               u32 head;
+               u32 ctl;
+               u32 hws;
+               u32 ipeir;
+               u32 ipehr;
+               u32 instdone;
+               u32 acthd;
+               u32 bbstate;
+               u32 instpm;
+               u32 instps;
+               u32 seqno;
+               u64 bbaddr;
+               u32 fault_reg;
+               u32 faddr;
+               u32 rc_psmi; /* sleep state */
+               u32 semaphore_mboxes[I915_NUM_RINGS - 1];
                struct drm_i915_error_object {
                        int page_count;
                        u32 gtt_offset;
                        u32 *pages[0];
-               } *ringbuffer, *batchbuffer, *ctx;
+               } *ringbuffer, *batchbuffer, *ctx, *hws_page;
                struct drm_i915_error_request {
                        long jiffies;
                        u32 seqno;
                        u32 tail;
                } *requests;
-               int num_requests;
+               struct {
+                       u32 gfx_mode;
+                       union {
+                               u64 pdp[4];
+                               u32 pp_dir_base;
+                       };
+               } vm_info;
        } ring[I915_NUM_RINGS];
        struct drm_i915_error_buffer {
                u32 size;
                s32 ring:4;
                u32 cache_level:3;
        } **active_bo, **pinned_bo;
        u32 *active_bo_count, *pinned_bo_count;
-       struct intel_overlay_error_state *overlay;
-       struct intel_display_error_state *display;
-       int hangcheck_score[I915_NUM_RINGS];
-       enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
  };
  
  struct intel_connector;
@@@ -507,6 -533,12 +533,12 @@@ struct intel_device_info 
        u8 gen;
        u8 ring_mask; /* Rings supported by the HW */
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+       /* Register offsets for the various display pipes and transcoders */
+       int pipe_offsets[I915_MAX_TRANSCODERS];
+       int trans_offsets[I915_MAX_TRANSCODERS];
+       int dpll_offsets[I915_MAX_PIPES];
+       int dpll_md_offsets[I915_MAX_PIPES];
+       int palette_offsets[I915_MAX_PIPES];
  };
  
  #undef DEFINE_FLAG
@@@ -524,6 -556,57 +556,57 @@@ enum i915_cache_level 
  
  typedef uint32_t gen6_gtt_pte_t;
  
+ /**
+  * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+  * VMA's presence cannot be guaranteed before binding, or after unbinding the
+  * object into/from the address space.
+  *
+  * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+  * will always be <= an objects lifetime. So object refcounting should cover us.
+  */
+ struct i915_vma {
+       struct drm_mm_node node;
+       struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
+       /** This object's place on the active/inactive lists */
+       struct list_head mm_list;
+       struct list_head vma_link; /* Link in the object's VMA list */
+       /** This vma's place in the batchbuffer or on the eviction list */
+       struct list_head exec_list;
+       /**
+        * Used for performing relocations during execbuffer insertion.
+        */
+       struct hlist_node exec_node;
+       unsigned long exec_handle;
+       struct drm_i915_gem_exec_object2 *exec_entry;
+       /**
+        * How many users have pinned this object in GTT space. The following
+        * users can each hold at most one reference: pwrite/pread, pin_ioctl
+        * (via user_pin_count), execbuffer (objects are not allowed multiple
+        * times for the same batchbuffer), and the framebuffer code. When
+        * switching/pageflipping, the framebuffer code has at most two buffers
+        * pinned per crtc.
+        *
+        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+        * bits with absolutely no headroom. So use 4 bits. */
+       unsigned int pin_count:4;
+ #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
+       /** Unmap an object from an address space. This usually consists of
+        * setting the valid PTE entries to a reserved scratch page. */
+       void (*unbind_vma)(struct i915_vma *vma);
+       /* Map an object into an address space with the given cache flags. */
+ #define GLOBAL_BIND (1<<0)
+       void (*bind_vma)(struct i915_vma *vma,
+                        enum i915_cache_level cache_level,
+                        u32 flags);
+ };
  struct i915_address_space {
        struct drm_mm mm;
        struct drm_device *dev;
@@@ -605,6 -688,8 +688,8 @@@ struct i915_gtt 
  
  struct i915_hw_ppgtt {
        struct i915_address_space base;
+       struct kref ref;
+       struct drm_mm_node node;
        unsigned num_pd_entries;
        union {
                struct page **pt_pages;
                dma_addr_t *pt_dma_addr;
                dma_addr_t *gen8_pt_dma_addr[4];
        };
-       int (*enable)(struct drm_device *dev);
- };
- /**
-  * A VMA represents a GEM BO that is bound into an address space. Therefore, a
-  * VMA's presence cannot be guaranteed before binding, or after unbinding the
-  * object into/from the address space.
-  *
-  * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
-  * will always be <= an objects lifetime. So object refcounting should cover us.
-  */
- struct i915_vma {
-       struct drm_mm_node node;
-       struct drm_i915_gem_object *obj;
-       struct i915_address_space *vm;
-       /** This object's place on the active/inactive lists */
-       struct list_head mm_list;
-       struct list_head vma_link; /* Link in the object's VMA list */
-       /** This vma's place in the batchbuffer or on the eviction list */
-       struct list_head exec_list;
-       /**
-        * Used for performing relocations during execbuffer insertion.
-        */
-       struct hlist_node exec_node;
-       unsigned long exec_handle;
-       struct drm_i915_gem_exec_object2 *exec_entry;
  
+       int (*enable)(struct i915_hw_ppgtt *ppgtt);
+       int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
+                        struct intel_ring_buffer *ring,
+                        bool synchronous);
+       void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
  };
  
  struct i915_ctx_hang_stats {
@@@ -676,9 -736,10 +736,10 @@@ struct i915_hw_context 
        bool is_initialized;
        uint8_t remap_slice;
        struct drm_i915_file_private *file_priv;
-       struct intel_ring_buffer *ring;
+       struct intel_ring_buffer *last_ring;
        struct drm_i915_gem_object *obj;
        struct i915_ctx_hang_stats hang_stats;
+       struct i915_address_space *vm;
  
        struct list_head link;
  };
@@@ -831,11 -892,7 +892,7 @@@ struct i915_suspend_saved_registers 
        u32 savePFIT_CONTROL;
        u32 save_palette_a[256];
        u32 save_palette_b[256];
-       u32 saveDPFC_CB_BASE;
-       u32 saveFBC_CFB_BASE;
-       u32 saveFBC_LL_BASE;
        u32 saveFBC_CONTROL;
-       u32 saveFBC_CONTROL2;
        u32 saveIER;
        u32 saveIIR;
        u32 saveIMR;
@@@ -905,8 -962,6 +962,6 @@@ struct intel_gen6_power_mgmt 
        struct work_struct work;
        u32 pm_iir;
  
-       /* The below variables an all the rps hw state are protected by
-        * dev->struct mutext. */
        u8 cur_delay;
        u8 min_delay;
        u8 max_delay;
        u8 rp0_delay;
        u8 hw_max;
  
+       bool rp_up_masked;
+       bool rp_down_masked;
        int last_adj;
        enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
  
@@@ -1361,8 -1419,6 +1419,6 @@@ typedef struct drm_i915_private 
        drm_dma_handle_t *status_page_dmah;
        struct resource mch_res;
  
-       atomic_t irq_received;
        /* protects the irq masks */
        spinlock_t irq_lock;
  
@@@ -1627,18 -1683,6 +1683,6 @@@ struct drm_i915_gem_object 
         */
        unsigned int fence_dirty:1;
  
-       /** How many users have pinned this object in GTT space. The following
-        * users can each hold at most one reference: pwrite/pread, pin_ioctl
-        * (via user_pin_count), execbuffer (objects are not allowed multiple
-        * times for the same batchbuffer), and the framebuffer code. When
-        * switching/pageflipping, the framebuffer code has at most two buffers
-        * pinned per crtc.
-        *
-        * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-        * bits with absolutely no headroom. So use 4 bits. */
-       unsigned int pin_count:4;
- #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
        /**
         * Is the object at the current location in the gtt mappable and
         * fenceable? Used to avoid costly recalculations.
@@@ -1751,7 -1795,7 +1795,7 @@@ struct drm_i915_file_private 
        } mm;
        struct idr context_idr;
  
-       struct i915_ctx_hang_stats hang_stats;
+       struct i915_hw_context *private_default_ctx;
        atomic_t rps_wait_boost;
  };
  
  #define I915_NEED_GFX_HWS(dev)        (INTEL_INFO(dev)->need_gfx_hws)
  
  #define HAS_HW_CONTEXTS(dev)  (INTEL_INFO(dev)->gen >= 6)
- #define HAS_ALIASING_PPGTT(dev)       (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
+ #define HAS_ALIASING_PPGTT(dev)       (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev))
+ #define HAS_PPGTT(dev)                (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \
+                                && !IS_BROADWELL(dev))
+ #define USES_PPGTT(dev)               intel_enable_ppgtt(dev, false)
+ #define USES_FULL_PPGTT(dev)  intel_enable_ppgtt(dev, true)
  
  #define HAS_OVERLAY(dev)              (INTEL_INFO(dev)->has_overlay)
  #define OVERLAY_NEEDS_PHYSICAL(dev)   (INTEL_INFO(dev)->overlay_needs_physical)
  
  /* Early gen2 have a totally busted CS tlb and require pinned batches. */
  #define HAS_BROKEN_CS_TLB(dev)                (IS_I830(dev) || IS_845G(dev))
 +/*
 + * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
 + * even when in MSI mode. This results in spurious interrupt warnings if the
 + * legacy irq no. is shared with another device. The kernel then disables that
 + * interrupt source and so prevents the other device from working properly.
 + */
 +#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
 +#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
  
  /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
   * rows, which changed the alignment requirements and fence programming.
  
  extern const struct drm_ioctl_desc i915_ioctls[];
  extern int i915_max_ioctl;
- extern unsigned int i915_fbpercrtc __always_unused;
- extern int i915_panel_ignore_lid __read_mostly;
- extern unsigned int i915_powersave __read_mostly;
- extern int i915_semaphores __read_mostly;
- extern unsigned int i915_lvds_downclock __read_mostly;
- extern int i915_lvds_channel_mode __read_mostly;
- extern int i915_panel_use_ssc __read_mostly;
- extern int i915_vbt_sdvo_panel_type __read_mostly;
- extern int i915_enable_rc6 __read_mostly;
- extern int i915_enable_fbc __read_mostly;
- extern bool i915_enable_hangcheck __read_mostly;
- extern int i915_enable_ppgtt __read_mostly;
- extern int i915_enable_psr __read_mostly;
- extern unsigned int i915_preliminary_hw_support __read_mostly;
- extern int i915_disable_power_well __read_mostly;
- extern int i915_enable_ips __read_mostly;
- extern bool i915_fastboot __read_mostly;
- extern int i915_enable_pc8 __read_mostly;
- extern int i915_pc8_timeout __read_mostly;
- extern bool i915_prefault_disable __read_mostly;
  
  extern int i915_suspend(struct drm_device *dev, pm_message_t state);
  extern int i915_resume(struct drm_device *dev);
  extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
  extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
  
+ /* i915_params.c */
+ struct i915_params {
+       int modeset;
+       int panel_ignore_lid;
+       unsigned int powersave;
+       int semaphores;
+       unsigned int lvds_downclock;
+       int lvds_channel_mode;
+       int panel_use_ssc;
+       int vbt_sdvo_panel_type;
+       int enable_rc6;
+       int enable_fbc;
+       bool enable_hangcheck;
+       int enable_ppgtt;
+       int enable_psr;
+       unsigned int preliminary_hw_support;
+       int disable_power_well;
+       int enable_ips;
+       bool fastboot;
+       int enable_pc8;
+       int pc8_timeout;
+       bool prefault_disable;
+       bool reset;
+       int invert_brightness;
+ };
+ extern struct i915_params i915 __read_mostly;
                                /* i915_dma.c */
  void i915_update_dri1_breadcrumb(struct drm_device *dev);
  extern void i915_kernel_lost_context(struct drm_device * dev);
@@@ -1945,6 -1992,8 +2000,8 @@@ extern void intel_console_resume(struc
  void i915_queue_hangcheck(struct drm_device *dev);
  void i915_handle_error(struct drm_device *dev, bool wedged);
  
+ void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
+                                                       int new_delay);
  extern void intel_irq_init(struct drm_device *dev);
  extern void intel_hpd_init(struct drm_device *dev);
  
@@@ -2014,6 -2063,8 +2071,8 @@@ void i915_gem_object_init(struct drm_i9
                         const struct drm_i915_gem_object_ops *ops);
  struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
+ void i915_init_vm(struct drm_i915_private *dev_priv,
+                 struct i915_address_space *vm);
  void i915_gem_free_object(struct drm_gem_object *obj);
  void i915_gem_vma_destroy(struct i915_vma *vma);
  
@@@ -2022,7 -2073,7 +2081,7 @@@ int __must_check i915_gem_object_pin(st
                                     uint32_t alignment,
                                     bool map_and_fenceable,
                                     bool nonblocking);
- void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+ void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
  int __must_check i915_vma_unbind(struct i915_vma *vma);
  int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
  int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
@@@ -2186,6 -2237,13 +2245,13 @@@ i915_gem_obj_lookup_or_create_vma(struc
                                  struct i915_address_space *vm);
  
  struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+ static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
+       struct i915_vma *vma;
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->pin_count > 0)
+                       return true;
+       return false;
+ }
  
  /* Some GGTT VM helpers */
  #define obj_to_ggtt(obj) \
@@@ -2225,46 -2283,56 +2291,56 @@@ i915_gem_obj_ggtt_pin(struct drm_i915_g
  }
  
  /* i915_gem_context.c */
+ #define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
  int __must_check i915_gem_context_init(struct drm_device *dev);
  void i915_gem_context_fini(struct drm_device *dev);
+ void i915_gem_context_reset(struct drm_device *dev);
+ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
+ int i915_gem_context_enable(struct drm_i915_private *dev_priv);
  void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
  int i915_switch_context(struct intel_ring_buffer *ring,
-                       struct drm_file *file, int to_id);
+                       struct drm_file *file, struct i915_hw_context *to);
+ struct i915_hw_context *
+ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
  void i915_gem_context_free(struct kref *ctx_ref);
  static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
  {
-       kref_get(&ctx->ref);
+       if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
+               kref_get(&ctx->ref);
  }
  
  static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
  {
-       kref_put(&ctx->ref, i915_gem_context_free);
+       if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev))
+               kref_put(&ctx->ref, i915_gem_context_free);
+ }
+ static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
+ {
+       return c->id == DEFAULT_CONTEXT_ID;
  }
  
- struct i915_ctx_hang_stats * __must_check
- i915_gem_context_get_hang_stats(struct drm_device *dev,
-                               struct drm_file *file,
-                               u32 id);
  int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file);
  int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
                                   struct drm_file *file);
  
- /* i915_gem_gtt.c */
- void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
- void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-                           struct drm_i915_gem_object *obj,
-                           enum i915_cache_level cache_level);
- void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-                             struct drm_i915_gem_object *obj);
+ /* i915_gem_evict.c */
+ int __must_check i915_gem_evict_something(struct drm_device *dev,
+                                         struct i915_address_space *vm,
+                                         int min_size,
+                                         unsigned alignment,
+                                         unsigned cache_level,
+                                         bool mappable,
+                                         bool nonblock);
+ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
+ int i915_gem_evict_everything(struct drm_device *dev);
  
+ /* i915_gem_gtt.c */
  void i915_check_and_clear_faults(struct drm_device *dev);
  void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
  void i915_gem_restore_gtt_mappings(struct drm_device *dev);
  int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
- void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-                               enum i915_cache_level cache_level);
- void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
  void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
  void i915_gem_init_global_gtt(struct drm_device *dev);
  void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
@@@ -2275,18 -2343,64 +2351,64 @@@ static inline void i915_gem_chipset_flu
        if (INTEL_INFO(dev)->gen < 6)
                intel_gtt_chipset_flush();
  }
+ int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
+ static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full)
+ {
+       if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+               return false;
  
+       if (i915.enable_ppgtt == 1 && full)
+               return false;
  
- /* i915_gem_evict.c */
- int __must_check i915_gem_evict_something(struct drm_device *dev,
-                                         struct i915_address_space *vm,
-                                         int min_size,
-                                         unsigned alignment,
-                                         unsigned cache_level,
-                                         bool mappable,
-                                         bool nonblock);
- int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
- int i915_gem_evict_everything(struct drm_device *dev);
+ #ifdef CONFIG_INTEL_IOMMU
+       /* Disable ppgtt on SNB if VT-d is on. */
+       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+               DRM_INFO("Disabling PPGTT because VT-d is on\n");
+               return false;
+       }
+ #endif
+       if (full)
+               return HAS_PPGTT(dev);
+       else
+               return HAS_ALIASING_PPGTT(dev);
+ }
+ static inline void ppgtt_release(struct kref *kref)
+ {
+       struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref);
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &ppgtt->base;
+       if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
+           (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
+               ppgtt->base.cleanup(&ppgtt->base);
+               return;
+       }
+       /*
+        * Make sure vmas are unbound before we take down the drm_mm
+        *
+        * FIXME: Proper refcounting should take care of this, this shouldn't be
+        * needed at all.
+        */
+       if (!list_empty(&vm->active_list)) {
+               struct i915_vma *vma;
+               list_for_each_entry(vma, &vm->active_list, mm_list)
+                       if (WARN_ON(list_empty(&vma->vma_link) ||
+                                   list_is_singular(&vma->vma_link)))
+                               break;
+               i915_gem_evict_vm(&ppgtt->base, true);
+       } else {
+               i915_gem_retire_requests(dev);
+               i915_gem_evict_vm(&ppgtt->base, false);
+       }
+       ppgtt->base.cleanup(&ppgtt->base);
+ }
  
  /* i915_gem_stolen.c */
  int i915_gem_init_stolen(struct drm_device *dev);
@@@ -2384,8 -2498,8 +2506,8 @@@ extern void intel_i2c_reset(struct drm_
  
  /* intel_opregion.c */
  struct intel_encoder;
 -extern int intel_opregion_setup(struct drm_device *dev);
  #ifdef CONFIG_ACPI
 +extern int intel_opregion_setup(struct drm_device *dev);
  extern void intel_opregion_init(struct drm_device *dev);
  extern void intel_opregion_fini(struct drm_device *dev);
  extern void intel_opregion_asle_intr(struct drm_device *dev);
@@@ -2394,7 -2508,6 +2516,7 @@@ extern int intel_opregion_notify_encode
  extern int intel_opregion_notify_adapter(struct drm_device *dev,
                                         pci_power_t state);
  #else
 +static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
  static inline void intel_opregion_init(struct drm_device *dev) { return; }
  static inline void intel_opregion_fini(struct drm_device *dev) { return; }
  static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
@@@ -2566,4 -2679,31 +2688,31 @@@ timespec_to_jiffies_timeout(const struc
        return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
  }
  
+ /*
+  * If you need to wait X milliseconds between events A and B, but event B
+  * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+  * when event A happened, then just before event B you call this function and
+  * pass the timestamp as the first argument, and X as the second argument.
+  */
+ static inline void
+ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+ {
+       unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+       /*
+        * Don't re-read the value of "jiffies" every time since it may change
+        * behind our back and break the math.
+        */
+       tmp_jiffies = jiffies;
+       target_jiffies = timestamp_jiffies +
+                        msecs_to_jiffies_timeout(to_wait_ms);
+       if (time_after(target_jiffies, tmp_jiffies)) {
+               remaining_jiffies = target_jiffies - tmp_jiffies;
+               while (remaining_jiffies)
+                       remaining_jiffies =
+                           schedule_timeout_uninterruptible(remaining_jiffies);
+       }
+ }
  #endif
index 40a2b36b276baa774028b56ae60b6ae6c59e919d,6e858e17bb0ceb4f383e9f8361cc4102fb79496c..a4364ae1a2d66a62e3cd106f7c6d982c304f6315
@@@ -22,6 -22,7 +22,7 @@@
   *
   */
  
+ #include <linux/seq_file.h>
  #include <drm/drmP.h>
  #include <drm/i915_drm.h>
  #include "i915_drv.h"
@@@ -70,6 -71,12 +71,12 @@@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t
  #define PPAT_CACHED_INDEX             _PAGE_PAT /* WB LLCeLLC */
  #define PPAT_DISPLAY_ELLC_INDEX               _PAGE_PCD /* WT eLLC */
  
+ static void ppgtt_bind_vma(struct i915_vma *vma,
+                          enum i915_cache_level cache_level,
+                          u32 flags);
+ static void ppgtt_unbind_vma(struct i915_vma *vma);
+ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
  static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
                                             enum i915_cache_level level,
                                             bool valid)
@@@ -199,12 -206,19 +206,19 @@@ static gen6_gtt_pte_t iris_pte_encode(d
  
  /* Broadwell Page Directory Pointer Descriptors */
  static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry,
-                          uint64_t val)
+                          uint64_t val, bool synchronous)
  {
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        int ret;
  
        BUG_ON(entry >= 4);
  
+       if (synchronous) {
+               I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
+               I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
+               return 0;
+       }
        ret = intel_ring_begin(ring, 6);
        if (ret)
                return ret;
        return 0;
  }
  
- static int gen8_ppgtt_enable(struct drm_device *dev)
+ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                         struct intel_ring_buffer *ring,
+                         bool synchronous)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       int i, j, ret;
+       int i, ret;
  
        /* bit of a hack to find the actual last used pd */
        int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
  
-       for_each_ring(ring, dev_priv, j) {
-               I915_WRITE(RING_MODE_GEN7(ring),
-                          _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-       }
        for (i = used_pd - 1; i >= 0; i--) {
                dma_addr_t addr = ppgtt->pd_dma_addr[i];
-               for_each_ring(ring, dev_priv, j) {
-                       ret = gen8_write_pdp(ring, i, addr);
-                       if (ret)
-                               goto err_out;
-               }
+               ret = gen8_write_pdp(ring, i, addr, synchronous);
+               if (ret)
+                       return ret;
        }
-       return 0;
  
- err_out:
-       for_each_ring(ring, dev_priv, j)
-               I915_WRITE(RING_MODE_GEN7(ring),
-                          _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
-       return ret;
+       return 0;
  }
  
  static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
@@@ -324,6 -325,7 +325,7 @@@ static void gen8_ppgtt_cleanup(struct i
                container_of(vm, struct i915_hw_ppgtt, base);
        int i, j;
  
+       list_del(&vm->global_link);
        drm_mm_takedown(&vm->mm);
  
        for (i = 0; i < ppgtt->num_pd_pages ; i++) {
@@@ -386,6 -388,7 +388,7 @@@ static int gen8_ppgtt_init(struct i915_
        ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT);
        ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
        ppgtt->enable = gen8_ppgtt_enable;
+       ppgtt->switch_mm = gen8_mm_switch;
        ppgtt->base.clear_range = gen8_ppgtt_clear_range;
        ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
        ppgtt->base.cleanup = gen8_ppgtt_cleanup;
@@@ -458,6 -461,62 +461,62 @@@ err_out
        return ret;
  }
  
+ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+ {
+       struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
+       struct i915_address_space *vm = &ppgtt->base;
+       gen6_gtt_pte_t __iomem *pd_addr;
+       gen6_gtt_pte_t scratch_pte;
+       uint32_t pd_entry;
+       int pte, pde;
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+       pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
+               ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
+       seq_printf(m, "  VM %p (pd_offset %x-%x):\n", vm,
+                  ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
+       for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
+               u32 expected;
+               gen6_gtt_pte_t *pt_vaddr;
+               dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
+               pd_entry = readl(pd_addr + pde);
+               expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
+               if (pd_entry != expected)
+                       seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+                                  pde,
+                                  pd_entry,
+                                  expected);
+               seq_printf(m, "\tPDE: %x\n", pd_entry);
+               pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
+               for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
+                       unsigned long va =
+                               (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
+                               (pte * PAGE_SIZE);
+                       int i;
+                       bool found = false;
+                       for (i = 0; i < 4; i++)
+                               if (pt_vaddr[pte + i] != scratch_pte)
+                                       found = true;
+                       if (!found)
+                               continue;
+                       seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+                       for (i = 0; i < 4; i++) {
+                               if (pt_vaddr[pte + i] != scratch_pte)
+                                       seq_printf(m, " %08x", pt_vaddr[pte + i]);
+                               else
+                                       seq_puts(m, "  SCRATCH ");
+                       }
+                       seq_puts(m, "\n");
+               }
+               kunmap_atomic(pt_vaddr);
+       }
+ }
  static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
  {
        struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
        readl(pd_addr);
  }
  
- static int gen6_ppgtt_enable(struct drm_device *dev)
+ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
  {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t pd_offset;
+       BUG_ON(ppgtt->pd_offset & 0x3f);
+       return (ppgtt->pd_offset / 64) << 16;
+ }
+ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                        struct intel_ring_buffer *ring,
+                        bool synchronous)
+ {
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+       /* If we're in reset, we can assume the GPU is sufficiently idle to
+        * manually frob these bits. Ideally we could use the ring functions,
+        * except our error handling makes it quite difficult (can't use
+        * intel_ring_begin, ring->flush, or intel_ring_advance)
+        *
+        * FIXME: We should try not to special case reset
+        */
+       if (synchronous ||
+           i915_reset_in_progress(&dev_priv->gpu_error)) {
+               WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+               I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+               POSTING_READ(RING_PP_DIR_BASE(ring));
+               return 0;
+       }
+       /* NB: TLBs must be flushed and invalidated before a switch */
+       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       if (ret)
+               return ret;
+       ret = intel_ring_begin(ring, 6);
+       if (ret)
+               return ret;
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+       intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+       intel_ring_emit(ring, PP_DIR_DCLV_2G);
+       intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+       intel_ring_emit(ring, get_pd_offset(ppgtt));
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+       return 0;
+ }
+ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                         struct intel_ring_buffer *ring,
+                         bool synchronous)
+ {
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+       /* If we're in reset, we can assume the GPU is sufficiently idle to
+        * manually frob these bits. Ideally we could use the ring functions,
+        * except our error handling makes it quite difficult (can't use
+        * intel_ring_begin, ring->flush, or intel_ring_advance)
+        *
+        * FIXME: We should try not to special case reset
+        */
+       if (synchronous ||
+           i915_reset_in_progress(&dev_priv->gpu_error)) {
+               WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
+               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+               I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+               POSTING_READ(RING_PP_DIR_BASE(ring));
+               return 0;
+       }
+       /* NB: TLBs must be flushed and invalidated before a switch */
+       ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       if (ret)
+               return ret;
+       ret = intel_ring_begin(ring, 6);
+       if (ret)
+               return ret;
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+       intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
+       intel_ring_emit(ring, PP_DIR_DCLV_2G);
+       intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
+       intel_ring_emit(ring, get_pd_offset(ppgtt));
+       intel_ring_emit(ring, MI_NOOP);
+       intel_ring_advance(ring);
+       /* XXX: RCS is the only one to auto invalidate the TLBs? */
+       if (ring->id != RCS) {
+               ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+ }
+ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                         struct intel_ring_buffer *ring,
+                         bool synchronous)
+ {
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (!synchronous)
+               return 0;
+       I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+       I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+       POSTING_READ(RING_PP_DIR_DCLV(ring));
+       return 0;
+ }
+ static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+ {
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       int i;
+       int j, ret;
  
-       BUG_ON(ppgtt->pd_offset & 0x3f);
+       for_each_ring(ring, dev_priv, j) {
+               I915_WRITE(RING_MODE_GEN7(ring),
+                          _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
  
-       gen6_write_pdes(ppgtt);
+               /* We promise to do a switch later with FULL PPGTT. If this is
+                * aliasing, this is the one and only switch we'll do */
+               if (USES_FULL_PPGTT(dev))
+                       continue;
  
-       pd_offset = ppgtt->pd_offset;
-       pd_offset /= 64; /* in cachelines, */
-       pd_offset <<= 16;
+               ret = ppgtt->switch_mm(ppgtt, ring, true);
+               if (ret)
+                       goto err_out;
+       }
  
-       if (INTEL_INFO(dev)->gen == 6) {
-               uint32_t ecochk, gab_ctl, ecobits;
+       return 0;
  
-               ecobits = I915_READ(GAC_ECO_BITS);
-               I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
-                                        ECOBITS_PPGTT_CACHE64B);
+ err_out:
+       for_each_ring(ring, dev_priv, j)
+               I915_WRITE(RING_MODE_GEN7(ring),
+                          _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
+       return ret;
+ }
  
-               gab_ctl = I915_READ(GAB_CTL);
-               I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+ {
+       struct drm_device *dev = ppgtt->base.dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       uint32_t ecochk, ecobits;
+       int i;
  
-               ecochk = I915_READ(GAM_ECOCHK);
-               I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
-                                      ECOCHK_PPGTT_CACHE64B);
-               I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-       } else if (INTEL_INFO(dev)->gen >= 7) {
-               uint32_t ecochk, ecobits;
+       ecobits = I915_READ(GAC_ECO_BITS);
+       I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
  
-               ecobits = I915_READ(GAC_ECO_BITS);
-               I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+       ecochk = I915_READ(GAM_ECOCHK);
+       if (IS_HASWELL(dev)) {
+               ecochk |= ECOCHK_PPGTT_WB_HSW;
+       } else {
+               ecochk |= ECOCHK_PPGTT_LLC_IVB;
+               ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
+       }
+       I915_WRITE(GAM_ECOCHK, ecochk);
  
-               ecochk = I915_READ(GAM_ECOCHK);
-               if (IS_HASWELL(dev)) {
-                       ecochk |= ECOCHK_PPGTT_WB_HSW;
-               } else {
-                       ecochk |= ECOCHK_PPGTT_LLC_IVB;
-                       ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
-               }
-               I915_WRITE(GAM_ECOCHK, ecochk);
+       for_each_ring(ring, dev_priv, i) {
+               int ret;
                /* GFX_MODE is per-ring on gen7+ */
+               I915_WRITE(RING_MODE_GEN7(ring),
+                          _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+               /* We promise to do a switch later with FULL PPGTT. If this is
+                * aliasing, this is the one and only switch we'll do */
+               if (USES_FULL_PPGTT(dev))
+                       continue;
+               ret = ppgtt->switch_mm(ppgtt, ring, true);
+               if (ret)
+                       return ret;
        }
  
-       for_each_ring(ring, dev_priv, i) {
-               if (INTEL_INFO(dev)->gen >= 7)
-                       I915_WRITE(RING_MODE_GEN7(ring),
-                                  _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+       return 0;
+ }
  
-               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-               I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
+ {
+       struct drm_device *dev = ppgtt->base.dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       uint32_t ecochk, gab_ctl, ecobits;
+       int i;
+       ecobits = I915_READ(GAC_ECO_BITS);
+       I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
+                  ECOBITS_PPGTT_CACHE64B);
+       gab_ctl = I915_READ(GAB_CTL);
+       I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+       ecochk = I915_READ(GAM_ECOCHK);
+       I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+       I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+       for_each_ring(ring, dev_priv, i) {
+               int ret = ppgtt->switch_mm(ppgtt, ring, true);
+               if (ret)
+                       return ret;
        }
        return 0;
  }
  
@@@ -608,7 -827,9 +827,9 @@@ static void gen6_ppgtt_cleanup(struct i
                container_of(vm, struct i915_hw_ppgtt, base);
        int i;
  
+       list_del(&vm->global_link);
        drm_mm_takedown(&ppgtt->base.mm);
+       drm_mm_remove_node(&ppgtt->node);
  
        if (ppgtt->pt_dma_addr) {
                for (i = 0; i < ppgtt->num_pd_entries; i++)
  
  static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
  {
+ #define GEN6_PD_ALIGN (PAGE_SIZE * 16)
+ #define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
        struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned first_pd_entry_in_global_pt;
-       int i;
-       int ret = -ENOMEM;
+       bool retried = false;
+       int i, ret;
  
-       /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
-        * entries. For aliasing ppgtt support we just steal them at the end for
-        * now. */
-       first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
+       /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+        * allocator works in address space sizes, so it's multiplied by page
+        * size. We allocate at the top of the GTT to avoid fragmentation.
+        */
+       BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ alloc:
+       ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+                                                 &ppgtt->node, GEN6_PD_SIZE,
+                                                 GEN6_PD_ALIGN, 0,
+                                                 0, dev_priv->gtt.base.total,
+                                                 DRM_MM_SEARCH_DEFAULT);
+       if (ret == -ENOSPC && !retried) {
+               ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+                                              GEN6_PD_SIZE, GEN6_PD_ALIGN,
+                                              I915_CACHE_NONE, false, true);
+               if (ret)
+                       return ret;
+               retried = true;
+               goto alloc;
+       }
+       if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+               DRM_DEBUG("Forced to use aperture for PDEs\n");
  
        ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
        ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
-       ppgtt->enable = gen6_ppgtt_enable;
+       if (IS_GEN6(dev)) {
+               ppgtt->enable = gen6_ppgtt_enable;
+               ppgtt->switch_mm = gen6_mm_switch;
+       } else if (IS_HASWELL(dev)) {
+               ppgtt->enable = gen7_ppgtt_enable;
+               ppgtt->switch_mm = hsw_mm_switch;
+       } else if (IS_GEN7(dev)) {
+               ppgtt->enable = gen7_ppgtt_enable;
+               ppgtt->switch_mm = gen7_mm_switch;
+       } else
+               BUG();
        ppgtt->base.clear_range = gen6_ppgtt_clear_range;
        ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
        ppgtt->base.cleanup = gen6_ppgtt_cleanup;
        ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
        ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
                                  GFP_KERNEL);
-       if (!ppgtt->pt_pages)
+       if (!ppgtt->pt_pages) {
+               drm_mm_remove_node(&ppgtt->node);
                return -ENOMEM;
+       }
  
        for (i = 0; i < ppgtt->num_pd_entries; i++) {
                ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
  
        ppgtt->base.clear_range(&ppgtt->base, 0,
                                ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
+       ppgtt->debug_dump = gen6_dump_ppgtt;
  
-       ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
+       DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
+                        ppgtt->node.size >> 20,
+                        ppgtt->node.start / PAGE_SIZE);
+       ppgtt->pd_offset =
+               ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
  
        return 0;
  
@@@ -696,19 -955,15 +955,15 @@@ err_pt_alloc
                        __free_page(ppgtt->pt_pages[i]);
        }
        kfree(ppgtt->pt_pages);
+       drm_mm_remove_node(&ppgtt->node);
  
        return ret;
  }
  
static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_hw_ppgtt *ppgtt;
-       int ret;
-       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-       if (!ppgtt)
-               return -ENOMEM;
+       int ret = 0;
  
        ppgtt->base.dev = dev;
  
        else
                BUG();
  
-       if (ret)
-               kfree(ppgtt);
-       else {
-               dev_priv->mm.aliasing_ppgtt = ppgtt;
+       if (!ret) {
+               struct drm_i915_private *dev_priv = dev->dev_private;
+               kref_init(&ppgtt->ref);
                drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
                            ppgtt->base.total);
+               i915_init_vm(dev_priv, &ppgtt->base);
+               if (INTEL_INFO(dev)->gen < 8) {
+                       gen6_write_pdes(ppgtt);
+                       DRM_DEBUG("Adding PPGTT at offset %x\n",
+                                 ppgtt->pd_offset << 10);
+               }
        }
  
        return ret;
  }
  
- void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+ static void
+ ppgtt_bind_vma(struct i915_vma *vma,
+              enum i915_cache_level cache_level,
+              u32 flags)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
  
-       if (!ppgtt)
-               return;
+       WARN_ON(flags);
  
-       ppgtt->base.cleanup(&ppgtt->base);
-       dev_priv->mm.aliasing_ppgtt = NULL;
+       vma->vm->insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
  }
  
- void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-                           struct drm_i915_gem_object *obj,
-                           enum i915_cache_level cache_level)
+ static void ppgtt_unbind_vma(struct i915_vma *vma)
  {
-       ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
-                                  i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-                                  cache_level);
- }
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
  
- void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-                             struct drm_i915_gem_object *obj)
- {
-       ppgtt->base.clear_range(&ppgtt->base,
-                               i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-                               obj->base.size >> PAGE_SHIFT,
-                               true);
+       vma->vm->clear_range(vma->vm,
+                            entry,
+                            vma->obj->base.size >> PAGE_SHIFT,
+                            true);
  }
  
  extern int intel_iommu_gfx_mapped;
@@@ -849,6 -1101,7 +1101,7 @@@ void i915_gem_restore_gtt_mappings(stru
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
  
        i915_check_and_clear_faults(dev);
  
                                       true);
  
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+                                                          &dev_priv->gtt.base);
+               if (!vma)
+                       continue;
                i915_gem_clflush_object(obj, obj->pin_display);
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+               /* The bind_vma code tries to be smart about tracking mappings.
+                * Unfortunately above, we've just wiped out the mappings
+                * without telling our object about it. So we need to fake it.
+                */
+               obj->has_global_gtt_mapping = 0;
+               vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+       }
+       if (INTEL_INFO(dev)->gen >= 8)
+               return;
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               /* TODO: Perhaps it shouldn't be gen6 specific */
+               if (i915_is_ggtt(vm)) {
+                       if (dev_priv->mm.aliasing_ppgtt)
+                               gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
+                       continue;
+               }
+               gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
        }
  
        i915_gem_chipset_flush(dev);
@@@ -1017,16 -1295,18 +1295,18 @@@ static void gen6_ggtt_clear_range(struc
        readl(gtt_base);
  }
  
- static void i915_ggtt_insert_entries(struct i915_address_space *vm,
                                   struct sg_table *st,
-                                    unsigned int pg_start,
-                                    enum i915_cache_level cache_level)
static void i915_ggtt_bind_vma(struct i915_vma *vma,
+                              enum i915_cache_level cache_level,
+                              u32 unused)
  {
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
        unsigned int flags = (cache_level == I915_CACHE_NONE) ?
                AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
  
-       intel_gtt_insert_sg_entries(st, pg_start, flags);
+       BUG_ON(!i915_is_ggtt(vma->vm));
+       intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
+       vma->obj->has_global_gtt_mapping = 1;
  }
  
  static void i915_ggtt_clear_range(struct i915_address_space *vm,
        intel_gtt_clear_range(first_entry, num_entries);
  }
  
+ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
+ {
+       const unsigned int first = vma->node.start >> PAGE_SHIFT;
+       const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
+       BUG_ON(!i915_is_ggtt(vma->vm));
+       vma->obj->has_global_gtt_mapping = 0;
+       intel_gtt_clear_range(first, size);
+ }
  
- void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-                             enum i915_cache_level cache_level)
+ static void ggtt_bind_vma(struct i915_vma *vma,
+                         enum i915_cache_level cache_level,
+                         u32 flags)
  {
-       struct drm_device *dev = obj->base.dev;
+       struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
+       struct drm_i915_gem_object *obj = vma->obj;
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
  
-       dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
-                                         entry,
-                                         cache_level);
+       /* If there is no aliasing PPGTT, or the caller needs a global mapping,
+        * or we have a global mapping already but the cacheability flags have
+        * changed, set the global PTEs.
+        *
+        * If there is an aliasing PPGTT it is anecdotally faster, so use that
+        * instead if none of the above hold true.
+        *
+        * NB: A global mapping should only be needed for special regions like
+        * "gtt mappable", SNB errata, or if specified via special execbuf
+        * flags. At all other times, the GPU will use the aliasing PPGTT.
+        */
+       if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
+               if (!obj->has_global_gtt_mapping ||
+                   (cache_level != obj->cache_level)) {
+                       vma->vm->insert_entries(vma->vm, obj->pages, entry,
+                                               cache_level);
+                       obj->has_global_gtt_mapping = 1;
+               }
+       }
  
-       obj->has_global_gtt_mapping = 1;
+       if (dev_priv->mm.aliasing_ppgtt &&
+           (!obj->has_aliasing_ppgtt_mapping ||
+            (cache_level != obj->cache_level))) {
+               struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+               appgtt->base.insert_entries(&appgtt->base,
+                                           vma->obj->pages, entry, cache_level);
+               vma->obj->has_aliasing_ppgtt_mapping = 1;
+       }
  }
  
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
static void ggtt_unbind_vma(struct i915_vma *vma)
  {
-       struct drm_device *dev = obj->base.dev;
+       struct drm_device *dev = vma->vm->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-                                      entry,
-                                      obj->base.size >> PAGE_SHIFT,
-                                      true);
+       struct drm_i915_gem_object *obj = vma->obj;
+       const unsigned long entry = vma->node.start >> PAGE_SHIFT;
+       if (obj->has_global_gtt_mapping) {
+               vma->vm->clear_range(vma->vm, entry,
+                                    vma->obj->base.size >> PAGE_SHIFT,
+                                    true);
+               obj->has_global_gtt_mapping = 0;
+       }
  
-       obj->has_global_gtt_mapping = 0;
+       if (obj->has_aliasing_ppgtt_mapping) {
+               struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+               appgtt->base.clear_range(&appgtt->base,
+                                        entry,
+                                        obj->base.size >> PAGE_SHIFT,
+                                        true);
+               obj->has_aliasing_ppgtt_mapping = 0;
+       }
  }
  
  void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@@ -1155,21 -1479,6 +1479,6 @@@ void i915_gem_setup_global_gtt(struct d
        ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
  }
  
- static bool
- intel_enable_ppgtt(struct drm_device *dev)
- {
-       if (i915_enable_ppgtt >= 0)
-               return i915_enable_ppgtt;
- #ifdef CONFIG_INTEL_IOMMU
-       /* Disable ppgtt on SNB if VT-d is on. */
-       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
-               return false;
- #endif
-       return true;
- }
  void i915_gem_init_global_gtt(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        gtt_size = dev_priv->gtt.base.total;
        mappable_size = dev_priv->gtt.mappable_end;
  
-       if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
-               int ret;
-               if (INTEL_INFO(dev)->gen <= 7) {
-                       /* PPGTT pdes are stolen from global gtt ptes, so shrink the
-                        * aperture accordingly when using aliasing ppgtt. */
-                       gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
-               }
-               i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
-               ret = i915_gem_init_aliasing_ppgtt(dev);
-               if (!ret)
-                       return;
-               DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
-               drm_mm_takedown(&dev_priv->gtt.base.mm);
-               if (INTEL_INFO(dev)->gen < 8)
-                       gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE;
-       }
        i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
  }
  
@@@ -1253,7 -1542,7 +1542,7 @@@ static inline unsigned int gen8_get_tot
        if (bdw_gmch_ctl)
                bdw_gmch_ctl = 1 << bdw_gmch_ctl;
        if (bdw_gmch_ctl > 4) {
-               WARN_ON(!i915_preliminary_hw_support);
+               WARN_ON(!i915.preliminary_hw_support);
                return 4<<20;
        }
  
@@@ -1278,14 -1567,14 +1567,14 @@@ static int ggtt_probe_common(struct drm
                             size_t gtt_size)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      phys_addr_t gtt_bus_addr;
 +      phys_addr_t gtt_phys_addr;
        int ret;
  
        /* For Modern GENs the PTEs and register space are split in the BAR */
 -      gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
 +      gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
                (pci_resource_len(dev->pdev, 0) / 2);
  
 -      dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
 +      dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
        if (!dev_priv->gtt.gsm) {
                DRM_ERROR("Failed to map the gtt page table\n");
                return -ENOMEM;
@@@ -1438,7 -1727,6 +1727,6 @@@ static int i915_gmch_probe(struct drm_d
  
        dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
        dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
-       dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
  
        if (unlikely(dev_priv->gtt.do_idle_maps))
                DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@@ -1493,3 -1781,62 +1781,62 @@@ int i915_gem_gtt_init(struct drm_devic
  
        return 0;
  }
+ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+                                             struct i915_address_space *vm)
+ {
+       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+       if (vma == NULL)
+               return ERR_PTR(-ENOMEM);
+       INIT_LIST_HEAD(&vma->vma_link);
+       INIT_LIST_HEAD(&vma->mm_list);
+       INIT_LIST_HEAD(&vma->exec_list);
+       vma->vm = vm;
+       vma->obj = obj;
+       switch (INTEL_INFO(vm->dev)->gen) {
+       case 8:
+       case 7:
+       case 6:
+               if (i915_is_ggtt(vm)) {
+                       vma->unbind_vma = ggtt_unbind_vma;
+                       vma->bind_vma = ggtt_bind_vma;
+               } else {
+                       vma->unbind_vma = ppgtt_unbind_vma;
+                       vma->bind_vma = ppgtt_bind_vma;
+               }
+               break;
+       case 5:
+       case 4:
+       case 3:
+       case 2:
+               BUG_ON(!i915_is_ggtt(vm));
+               vma->unbind_vma = i915_ggtt_unbind_vma;
+               vma->bind_vma = i915_ggtt_bind_vma;
+               break;
+       default:
+               BUG();
+       }
+       /* Keep GGTT vmas first to make debug easier */
+       if (i915_is_ggtt(vm))
+               list_add(&vma->vma_link, &obj->vma_list);
+       else
+               list_add_tail(&vma->vma_link, &obj->vma_list);
+       return vma;
+ }
+ struct i915_vma *
+ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+                                 struct i915_address_space *vm)
+ {
+       struct i915_vma *vma;
+       vma = i915_gem_obj_to_vma(obj, vm);
+       if (!vma)
+               vma = __i915_gem_vma_create(obj, vm);
+       return vma;
+ }
index 990cf8f43efda908ecb1565cb5e8a786efe68306,dc47bb9742d2cea50a6e728cc97ad277fc49a7f2..000b3694f349a31bbed6c32e4e46f20fa848d6c0
@@@ -146,10 -146,7 +146,10 @@@ static void i915_error_vprintf(struct d
                va_list tmp;
  
                va_copy(tmp, args);
 -              if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
 +              len = vsnprintf(NULL, 0, f, tmp);
 +              va_end(tmp);
 +
 +              if (!__i915_error_seek(e, len))
                        return;
        }
  
@@@ -238,50 -235,61 +238,61 @@@ static const char *hangcheck_action_to_
  
  static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
                                  struct drm_device *dev,
-                                 struct drm_i915_error_state *error,
-                                 unsigned ring)
+                                 struct drm_i915_error_ring *ring)
  {
-       BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
-       if (!error->ring[ring].valid)
+       if (!ring->valid)
                return;
  
-       err_printf(m, "%s command stream:\n", ring_str(ring));
-       err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
-       err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
-       err_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
-       err_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
-       err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
-       err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
-       err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
+       err_printf(m, "  HEAD: 0x%08x\n", ring->head);
+       err_printf(m, "  TAIL: 0x%08x\n", ring->tail);
+       err_printf(m, "  CTL: 0x%08x\n", ring->ctl);
+       err_printf(m, "  HWS: 0x%08x\n", ring->hws);
+       err_printf(m, "  ACTHD: 0x%08x\n", ring->acthd);
+       err_printf(m, "  IPEIR: 0x%08x\n", ring->ipeir);
+       err_printf(m, "  IPEHR: 0x%08x\n", ring->ipehr);
+       err_printf(m, "  INSTDONE: 0x%08x\n", ring->instdone);
        if (INTEL_INFO(dev)->gen >= 4) {
-               err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr[ring]);
-               err_printf(m, "  BB_STATE: 0x%08x\n", error->bbstate[ring]);
-               err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
+               err_printf(m, "  BBADDR: 0x%08llx\n", ring->bbaddr);
+               err_printf(m, "  BB_STATE: 0x%08x\n", ring->bbstate);
+               err_printf(m, "  INSTPS: 0x%08x\n", ring->instps);
        }
-       err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
-       err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
+       err_printf(m, "  INSTPM: 0x%08x\n", ring->instpm);
+       err_printf(m, "  FADDR: 0x%08x\n", ring->faddr);
        if (INTEL_INFO(dev)->gen >= 6) {
-               err_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
-               err_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+               err_printf(m, "  RC PSMI: 0x%08x\n", ring->rc_psmi);
+               err_printf(m, "  FAULT_REG: 0x%08x\n", ring->fault_reg);
                err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
-                          error->semaphore_mboxes[ring][0],
-                          error->semaphore_seqno[ring][0]);
+                          ring->semaphore_mboxes[0],
+                          ring->semaphore_seqno[0]);
                err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
-                          error->semaphore_mboxes[ring][1],
-                          error->semaphore_seqno[ring][1]);
+                          ring->semaphore_mboxes[1],
+                          ring->semaphore_seqno[1]);
                if (HAS_VEBOX(dev)) {
                        err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
-                                  error->semaphore_mboxes[ring][2],
-                                  error->semaphore_seqno[ring][2]);
+                                  ring->semaphore_mboxes[2],
+                                  ring->semaphore_seqno[2]);
                }
        }
-       err_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
-       err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
-       err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
-       err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+       if (USES_PPGTT(dev)) {
+               err_printf(m, "  GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+               if (INTEL_INFO(dev)->gen >= 8) {
+                       int i;
+                       for (i = 0; i < 4; i++)
+                               err_printf(m, "  PDP%d: 0x%016llx\n",
+                                          i, ring->vm_info.pdp[i]);
+               } else {
+                       err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
+                                  ring->vm_info.pp_dir_base);
+               }
+       }
+       err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
+       err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
+       err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
+       err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
        err_printf(m, "  hangcheck: %s [%d]\n",
-                  hangcheck_action_to_str(error->hangcheck_action[ring]),
-                  error->hangcheck_score[ring]);
+                  hangcheck_action_to_str(ring->hangcheck_action),
+                  ring->hangcheck_score);
  }
  
  void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@@ -333,8 -341,10 +344,10 @@@ int i915_error_state_to_str(struct drm_
        if (INTEL_INFO(dev)->gen == 7)
                err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
  
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++)
-               i915_ring_error_state(m, dev, error, i);
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               err_printf(m, "%s command stream:\n", ring_str(i));
+               i915_ring_error_state(m, dev, &error->ring[i]);
+       }
  
        if (error->active_bo)
                print_error_buffers(m, "Active",
                        }
                }
  
+               if ((obj = error->ring[i].hws_page)) {
+                       err_printf(m, "%s --- HW Status = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  obj->gtt_offset);
+                       offset = 0;
+                       for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+                               err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+                                          offset,
+                                          obj->pages[0][elt],
+                                          obj->pages[0][elt+1],
+                                          obj->pages[0][elt+2],
+                                          obj->pages[0][elt+3]);
+                                       offset += 16;
+                       }
+               }
                if ((obj = error->ring[i].ctx)) {
                        err_printf(m, "%s --- HW Context = 0x%08x\n",
                                   dev_priv->ring[i].name,
@@@ -472,6 -498,7 +501,7 @@@ static void i915_error_state_free(struc
        for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
                i915_error_object_free(error->ring[i].batchbuffer);
                i915_error_object_free(error->ring[i].ringbuffer);
+               i915_error_object_free(error->ring[i].hws_page);
                i915_error_object_free(error->ring[i].ctx);
                kfree(error->ring[i].requests);
        }
  static struct drm_i915_error_object *
  i915_error_object_create_sized(struct drm_i915_private *dev_priv,
                               struct drm_i915_gem_object *src,
+                              struct i915_address_space *vm,
                               const int num_pages)
  {
        struct drm_i915_error_object *dst;
        if (dst == NULL)
                return NULL;
  
-       reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
+       reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm);
        for (i = 0; i < num_pages; i++) {
                unsigned long flags;
                void *d;
                        goto unwind;
  
                local_irq_save(flags);
-               if (reloc_offset < dev_priv->gtt.mappable_end &&
-                   src->has_global_gtt_mapping) {
+               if (src->cache_level == I915_CACHE_NONE &&
+                   reloc_offset < dev_priv->gtt.mappable_end &&
+                   src->has_global_gtt_mapping &&
+                   i915_is_ggtt(vm)) {
                        void __iomem *s;
  
                        /* Simply ignore tiling or any overlapping fence.
@@@ -559,8 -589,12 +592,12 @@@ unwind
        kfree(dst);
        return NULL;
  }
- #define i915_error_object_create(dev_priv, src) \
-       i915_error_object_create_sized((dev_priv), (src), \
+ #define i915_error_object_create(dev_priv, src, vm) \
+       i915_error_object_create_sized((dev_priv), (src), (vm), \
+                                      (src)->base.size>>PAGE_SHIFT)
+ #define i915_error_ggtt_object_create(dev_priv, src) \
+       i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \
                                       (src)->base.size>>PAGE_SHIFT)
  
  static void capture_bo(struct drm_i915_error_buffer *err,
        err->write_domain = obj->base.write_domain;
        err->fence_reg = obj->fence_reg;
        err->pinned = 0;
-       if (obj->pin_count > 0)
+       if (i915_gem_obj_is_pinned(obj))
                err->pinned = 1;
        if (obj->user_pin_count > 0)
                err->pinned = -1;
@@@ -608,7 -642,7 +645,7 @@@ static u32 capture_pinned_bo(struct drm
        int i = 0;
  
        list_for_each_entry(obj, head, global_list) {
-               if (obj->pin_count == 0)
+               if (!i915_gem_obj_is_pinned(obj))
                        continue;
  
                capture_bo(err++, obj);
        return i;
  }
  
+ /* Generate a semi-unique error code. The code is not meant to have meaning, The
+  * code's only purpose is to try to prevent false duplicated bug reports by
+  * grossly estimating a GPU error state.
+  *
+  * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+  * the hang if we could strip the GTT offset information from it.
+  *
+  * It's only a small step better than a random number in its current form.
+  */
+ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
+                                        struct drm_i915_error_state *error)
+ {
+       uint32_t error_code = 0;
+       int i;
+       /* IPEHR would be an ideal way to detect errors, as it's the gross
+        * measure of "the command that hung." However, has some very common
+        * synchronization commands which almost always appear in the case
+        * strictly a client bug. Use instdone to differentiate those some.
+        */
+       for (i = 0; i < I915_NUM_RINGS; i++)
+               if (error->ring[i].hangcheck_action == HANGCHECK_HUNG)
+                       return error->ring[i].ipehr ^ error->ring[i].instdone;
+       return error_code;
+ }
  static void i915_gem_record_fences(struct drm_device *dev,
                                   struct drm_i915_error_state *error)
  {
        }
  }
  
+ /* This assumes all batchbuffers are executed from the PPGTT. It might have to
+  * change in the future. */
+ static bool is_active_vm(struct i915_address_space *vm,
+                        struct intel_ring_buffer *ring)
+ {
+       struct drm_device *dev = vm->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_hw_ppgtt *ppgtt;
+       if (INTEL_INFO(dev)->gen < 7)
+               return i915_is_ggtt(vm);
+       /* FIXME: This ignores that the global gtt vm is also on this list. */
+       ppgtt = container_of(vm, struct i915_hw_ppgtt, base);
+       if (INTEL_INFO(dev)->gen >= 8) {
+               u64 pdp0 = (u64)I915_READ(GEN8_RING_PDP_UDW(ring, 0)) << 32;
+               pdp0 |=  I915_READ(GEN8_RING_PDP_LDW(ring, 0));
+               return pdp0 == ppgtt->pd_dma_addr[0];
+       } else {
+               u32 pp_db;
+               pp_db = I915_READ(RING_PP_DIR_BASE(ring));
+               return (pp_db >> 10) == ppgtt->pd_offset;
+       }
+ }
  static struct drm_i915_error_object *
  i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
                             struct intel_ring_buffer *ring)
        struct i915_address_space *vm;
        struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
+       bool found_active = false;
        u32 seqno;
  
        if (!ring->get_seqno)
                if (obj != NULL &&
                    acthd >= i915_gem_obj_ggtt_offset(obj) &&
                    acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
-                       return i915_error_object_create(dev_priv, obj);
+                       return i915_error_ggtt_object_create(dev_priv, obj);
        }
  
        seqno = ring->get_seqno(ring, false);
        list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               if (!is_active_vm(vm, ring))
+                       continue;
+               found_active = true;
                list_for_each_entry(vma, &vm->active_list, mm_list) {
                        obj = vma->obj;
                        if (obj->ring != ring)
                        /* We need to copy these to an anonymous buffer as the simplest
                         * method to avoid being overwritten by userspace.
                         */
-                       return i915_error_object_create(dev_priv, obj);
+                       return i915_error_object_create(dev_priv, obj, vm);
                }
        }
  
+       WARN_ON(!found_active);
        return NULL;
  }
  
  static void i915_record_ring_state(struct drm_device *dev,
-                                  struct drm_i915_error_state *error,
-                                  struct intel_ring_buffer *ring)
+                                  struct intel_ring_buffer *ring,
+                                  struct drm_i915_error_ring *ering)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        if (INTEL_INFO(dev)->gen >= 6) {
-               error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
-               error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
-               error->semaphore_mboxes[ring->id][0]
+               ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
+               ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+               ering->semaphore_mboxes[0]
                        = I915_READ(RING_SYNC_0(ring->mmio_base));
-               error->semaphore_mboxes[ring->id][1]
+               ering->semaphore_mboxes[1]
                        = I915_READ(RING_SYNC_1(ring->mmio_base));
-               error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
-               error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+               ering->semaphore_seqno[0] = ring->sync_seqno[0];
+               ering->semaphore_seqno[1] = ring->sync_seqno[1];
        }
  
        if (HAS_VEBOX(dev)) {
-               error->semaphore_mboxes[ring->id][2] =
+               ering->semaphore_mboxes[2] =
                        I915_READ(RING_SYNC_2(ring->mmio_base));
-               error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
+               ering->semaphore_seqno[2] = ring->sync_seqno[2];
        }
  
        if (INTEL_INFO(dev)->gen >= 4) {
-               error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
-               error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
-               error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
-               error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
-               error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
-               error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base));
+               ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
+               ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
+               ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+               ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
+               ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
+               ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
                if (INTEL_INFO(dev)->gen >= 8)
-                       error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
-               error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base));
+                       ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+               ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
        } else {
-               error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
-               error->ipeir[ring->id] = I915_READ(IPEIR);
-               error->ipehr[ring->id] = I915_READ(IPEHR);
-               error->instdone[ring->id] = I915_READ(INSTDONE);
+               ering->faddr = I915_READ(DMA_FADD_I8XX);
+               ering->ipeir = I915_READ(IPEIR);
+               ering->ipehr = I915_READ(IPEHR);
+               ering->instdone = I915_READ(INSTDONE);
        }
  
-       error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
-       error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
-       error->seqno[ring->id] = ring->get_seqno(ring, false);
-       error->acthd[ring->id] = intel_ring_get_active_head(ring);
-       error->head[ring->id] = I915_READ_HEAD(ring);
-       error->tail[ring->id] = I915_READ_TAIL(ring);
-       error->ctl[ring->id] = I915_READ_CTL(ring);
+       ering->waiting = waitqueue_active(&ring->irq_queue);
+       ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
+       ering->seqno = ring->get_seqno(ring, false);
+       ering->acthd = intel_ring_get_active_head(ring);
+       ering->head = I915_READ_HEAD(ring);
+       ering->tail = I915_READ_TAIL(ring);
+       ering->ctl = I915_READ_CTL(ring);
+       if (I915_NEED_GFX_HWS(dev)) {
+               int mmio;
+               if (IS_GEN7(dev)) {
+                       switch (ring->id) {
+                       default:
+                       case RCS:
+                               mmio = RENDER_HWS_PGA_GEN7;
+                               break;
+                       case BCS:
+                               mmio = BLT_HWS_PGA_GEN7;
+                               break;
+                       case VCS:
+                               mmio = BSD_HWS_PGA_GEN7;
+                               break;
+                       case VECS:
+                               mmio = VEBOX_HWS_PGA_GEN7;
+                               break;
+                       }
+               } else if (IS_GEN6(ring->dev)) {
+                       mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+               } else {
+                       /* XXX: gen8 returns to sanity */
+                       mmio = RING_HWS_PGA(ring->mmio_base);
+               }
+               ering->hws = I915_READ(mmio);
+       }
+       ering->cpu_ring_head = ring->head;
+       ering->cpu_ring_tail = ring->tail;
+       ering->hangcheck_score = ring->hangcheck.score;
+       ering->hangcheck_action = ring->hangcheck.action;
+       if (USES_PPGTT(dev)) {
+               int i;
  
-       error->cpu_ring_head[ring->id] = ring->head;
-       error->cpu_ring_tail[ring->id] = ring->tail;
+               ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
  
-       error->hangcheck_score[ring->id] = ring->hangcheck.score;
-       error->hangcheck_action[ring->id] = ring->hangcheck.action;
+               switch (INTEL_INFO(dev)->gen) {
+               case 8:
+                       for (i = 0; i < 4; i++) {
+                               ering->vm_info.pdp[i] =
+                                       I915_READ(GEN8_RING_PDP_UDW(ring, i));
+                               ering->vm_info.pdp[i] <<= 32;
+                               ering->vm_info.pdp[i] |=
+                                       I915_READ(GEN8_RING_PDP_LDW(ring, i));
+                       }
+                       break;
+               case 7:
+                       ering->vm_info.pp_dir_base = RING_PP_DIR_BASE(ring);
+                       break;
+               case 6:
+                       ering->vm_info.pp_dir_base = RING_PP_DIR_BASE_READ(ring);
+                       break;
+               }
+       }
  }
  
  
@@@ -770,7 -917,9 +920,9 @@@ static void i915_gem_record_active_cont
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
                        ering->ctx = i915_error_object_create_sized(dev_priv,
-                                                                   obj, 1);
+                                                                   obj,
+                                                                   &dev_priv->gtt.base,
+                                                                   1);
                        break;
                }
        }
@@@ -791,14 -940,17 +943,17 @@@ static void i915_gem_record_rings(struc
  
                error->ring[i].valid = true;
  
-               i915_record_ring_state(dev, error, ring);
+               i915_record_ring_state(dev, ring, &error->ring[i]);
  
                error->ring[i].batchbuffer =
                        i915_error_first_batchbuffer(dev_priv, ring);
  
                error->ring[i].ringbuffer =
-                       i915_error_object_create(dev_priv, ring->obj);
+                       i915_error_ggtt_object_create(dev_priv, ring->obj);
  
+               if (ring->status_page.obj)
+                       error->ring[i].hws_page =
+                               i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
  
                i915_gem_record_active_context(ring, error, &error->ring[i]);
  
@@@ -845,7 -997,7 +1000,7 @@@ static void i915_gem_capture_vm(struct 
                i++;
        error->active_bo_count[ndx] = i;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-               if (obj->pin_count)
+               if (i915_gem_obj_is_pinned(obj))
                        i++;
        error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
  
@@@ -879,11 -1031,6 +1034,6 @@@ static void i915_gem_capture_buffers(st
        list_for_each_entry(vm, &dev_priv->vm_list, global_link)
                cnt++;
  
-       if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
-               cnt = 1;
-       vm = &dev_priv->gtt.base;
        error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
        error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
        error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
                i915_gem_capture_vm(dev_priv, error, vm, i++);
  }
  
+ /* Capture all registers which don't fit into another category. */
+ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
+                                  struct drm_i915_error_state *error)
+ {
+       struct drm_device *dev = dev_priv->dev;
+       int pipe;
+       /* General organization
+        * 1. Registers specific to a single generation
+        * 2. Registers which belong to multiple generations
+        * 3. Feature specific registers.
+        * 4. Everything else
+        * Please try to follow the order.
+        */
+       /* 1: Registers specific to a single generation */
+       if (IS_VALLEYVIEW(dev)) {
+               error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+               error->forcewake = I915_READ(FORCEWAKE_VLV);
+       }
+       if (IS_GEN7(dev))
+               error->err_int = I915_READ(GEN7_ERR_INT);
+       if (IS_GEN6(dev)) {
+               error->forcewake = I915_READ(FORCEWAKE);
+               error->gab_ctl = I915_READ(GAB_CTL);
+               error->gfx_mode = I915_READ(GFX_MODE);
+       }
+       if (IS_GEN2(dev))
+               error->ier = I915_READ16(IER);
+       /* 2: Registers which belong to multiple generations */
+       if (INTEL_INFO(dev)->gen >= 7)
+               error->forcewake = I915_READ(FORCEWAKE_MT);
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->derrmr = I915_READ(DERRMR);
+               error->error = I915_READ(ERROR_GEN6);
+               error->done_reg = I915_READ(DONE_REG);
+       }
+       /* 3: Feature specific registers */
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               error->gam_ecochk = I915_READ(GAM_ECOCHK);
+               error->gac_eco = I915_READ(GAC_ECO_BITS);
+       }
+       /* 4: Everything else */
+       if (HAS_HW_CONTEXTS(dev))
+               error->ccid = I915_READ(CCID);
+       if (HAS_PCH_SPLIT(dev))
+               error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+       else {
+               error->ier = I915_READ(IER);
+               for_each_pipe(pipe)
+                       error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+       }
+       /* 4: Everything else */
+       error->eir = I915_READ(EIR);
+       error->pgtbl_er = I915_READ(PGTBL_ER);
+       i915_get_extra_instdone(dev, error->extra_instdone);
+ }
  /**
   * i915_capture_error_state - capture an error record for later analysis
   * @dev: drm device
   */
  void i915_capture_error_state(struct drm_device *dev)
  {
+       static bool warned;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error;
        unsigned long flags;
-       int pipe;
+       uint32_t ecode;
  
        spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
        error = dev_priv->gpu_error.first_error;
  
        DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
                 dev->primary->index);
-       DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
-       DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
-       DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
-       DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
        kref_init(&error->ref);
-       error->eir = I915_READ(EIR);
-       error->pgtbl_er = I915_READ(PGTBL_ER);
-       if (HAS_HW_CONTEXTS(dev))
-               error->ccid = I915_READ(CCID);
-       if (HAS_PCH_SPLIT(dev))
-               error->ier = I915_READ(DEIER) | I915_READ(GTIER);
-       else if (IS_VALLEYVIEW(dev))
-               error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
-       else if (IS_GEN2(dev))
-               error->ier = I915_READ16(IER);
-       else
-               error->ier = I915_READ(IER);
-       if (INTEL_INFO(dev)->gen >= 6)
-               error->derrmr = I915_READ(DERRMR);
-       if (IS_VALLEYVIEW(dev))
-               error->forcewake = I915_READ(FORCEWAKE_VLV);
-       else if (INTEL_INFO(dev)->gen >= 7)
-               error->forcewake = I915_READ(FORCEWAKE_MT);
-       else if (INTEL_INFO(dev)->gen == 6)
-               error->forcewake = I915_READ(FORCEWAKE);
-       if (!HAS_PCH_SPLIT(dev))
-               for_each_pipe(pipe)
-                       error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-       if (INTEL_INFO(dev)->gen >= 6) {
-               error->error = I915_READ(ERROR_GEN6);
-               error->done_reg = I915_READ(DONE_REG);
-       }
-       if (INTEL_INFO(dev)->gen == 7)
-               error->err_int = I915_READ(GEN7_ERR_INT);
-       i915_get_extra_instdone(dev, error->extra_instdone);
  
+       i915_capture_reg_state(dev_priv, error);
        i915_gem_capture_buffers(dev_priv, error);
        i915_gem_record_fences(dev, error);
        i915_gem_record_rings(dev, error);
+       ecode = i915_error_generate_code(dev_priv, error);
+       if (!warned) {
+               DRM_INFO("GPU HANG [%x]\n", ecode);
+               DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+               DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+               DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+               DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
+               warned = true;
+       }
  
        do_gettimeofday(&error->time);
  
index 9fec71175571e068cce957973431fe21eca962d2,8f579bc3b26da1e73b8a853fec5af27c650a4887..e9c94c91c6a59a40a45d11cccf90f9d9cfd2e637
@@@ -232,6 -232,18 +232,18 @@@ static bool cpt_can_enable_serr_int(str
        return true;
  }
  
+ static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 reg = PIPESTAT(pipe);
+       u32 pipestat = I915_READ(reg) & 0x7fff0000;
+       assert_spin_locked(&dev_priv->irq_lock);
+       I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+       POSTING_READ(reg);
+ }
  static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
                                                 enum pipe pipe, bool enable)
  {
@@@ -393,7 -405,9 +405,9 @@@ bool intel_set_cpu_fifo_underrun_report
  
        intel_crtc->cpu_fifo_underrun_disabled = !enable;
  
-       if (IS_GEN5(dev) || IS_GEN6(dev))
+       if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
+               i9xx_clear_fifo_underrun(dev, pipe);
+       else if (IS_GEN5(dev) || IS_GEN6(dev))
                ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
        else if (IS_GEN7(dev))
                ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
@@@ -567,7 -581,8 +581,7 @@@ static u32 i915_get_vblank_counter(stru
  
                vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
        } else {
 -              enum transcoder cpu_transcoder =
 -                      intel_pipe_to_cpu_transcoder(dev_priv, pipe);
 +              enum transcoder cpu_transcoder = (enum transcoder) pipe;
                u32 htotal;
  
                htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
@@@ -915,6 -930,11 +929,11 @@@ static void i915_hotplug_work_func(stru
                drm_kms_helper_hotplug_event(dev);
  }
  
+ static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
+ {
+       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+ }
  static void ironlake_rps_change_irq_handler(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@@ -966,6 -986,43 +985,43 @@@ static void notify_ring(struct drm_devi
        i915_queue_hangcheck(dev);
  }
  
+ void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
+                            u32 pm_iir, int new_delay)
+ {
+       if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+               if (new_delay >= dev_priv->rps.max_delay) {
+                       /* Mask UP THRESHOLD Interrupts */
+                       I915_WRITE(GEN6_PMINTRMSK,
+                                  I915_READ(GEN6_PMINTRMSK) |
+                                  GEN6_PM_RP_UP_THRESHOLD);
+                       dev_priv->rps.rp_up_masked = true;
+               }
+               if (dev_priv->rps.rp_down_masked) {
+                       /* UnMask DOWN THRESHOLD Interrupts */
+                       I915_WRITE(GEN6_PMINTRMSK,
+                                  I915_READ(GEN6_PMINTRMSK) &
+                                  ~GEN6_PM_RP_DOWN_THRESHOLD);
+                       dev_priv->rps.rp_down_masked = false;
+               }
+       } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+               if (new_delay <= dev_priv->rps.min_delay) {
+                       /* Mask DOWN THRESHOLD Interrupts */
+                       I915_WRITE(GEN6_PMINTRMSK,
+                                  I915_READ(GEN6_PMINTRMSK) |
+                                  GEN6_PM_RP_DOWN_THRESHOLD);
+                       dev_priv->rps.rp_down_masked = true;
+               }
+               if (dev_priv->rps.rp_up_masked) {
+                       /* UnMask UP THRESHOLD Interrupts */
+                       I915_WRITE(GEN6_PMINTRMSK,
+                                  I915_READ(GEN6_PMINTRMSK) &
+                                  ~GEN6_PM_RP_UP_THRESHOLD);
+                       dev_priv->rps.rp_up_masked = false;
+               }
+       }
+ }
  static void gen6_pm_rps_work(struct work_struct *work)
  {
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
         */
        new_delay = clamp_t(int, new_delay,
                            dev_priv->rps.min_delay, dev_priv->rps.max_delay);
+       gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
        dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
  
        if (IS_VALLEYVIEW(dev_priv->dev))
@@@ -1236,6 -1295,9 +1294,9 @@@ static inline void intel_hpd_irq_handle
        if (!hotplug_trigger)
                return;
  
+       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+                         hotplug_trigger);
        spin_lock(&dev_priv->irq_lock);
        for (i = 1; i < HPD_NUM_PINS; i++) {
  
@@@ -1415,17 -1477,52 +1476,52 @@@ static void gen6_rps_irq_handler(struc
        }
  }
  
+ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pipe_stats[I915_MAX_PIPES];
+       int pipe;
+       spin_lock(&dev_priv->irq_lock);
+       for_each_pipe(pipe) {
+               int reg = PIPESTAT(pipe);
+               pipe_stats[pipe] = I915_READ(reg);
+               /*
+                * Clear the PIPE*STAT regs before the IIR
+                */
+               if (pipe_stats[pipe] & 0x8000ffff)
+                       I915_WRITE(reg, pipe_stats[pipe]);
+       }
+       spin_unlock(&dev_priv->irq_lock);
+       for_each_pipe(pipe) {
+               if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
+                       drm_handle_vblank(dev, pipe);
+               if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
+                       intel_prepare_page_flip(dev, pipe);
+                       intel_finish_page_flip(dev, pipe);
+               }
+               if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+                       i9xx_pipe_crc_irq_handler(dev, pipe);
+               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+                   intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+                       DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+       }
+       if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+               gmbus_irq_handler(dev);
+ }
  static irqreturn_t valleyview_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 iir, gt_iir, pm_iir;
        irqreturn_t ret = IRQ_NONE;
-       unsigned long irqflags;
-       int pipe;
-       u32 pipe_stats[I915_MAX_PIPES];
-       atomic_inc(&dev_priv->irq_received);
  
        while (true) {
                iir = I915_READ(VLV_IIR);
  
                snb_gt_irq_handler(dev, dev_priv, gt_iir);
  
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-               for_each_pipe(pipe) {
-                       int reg = PIPESTAT(pipe);
-                       pipe_stats[pipe] = I915_READ(reg);
-                       /*
-                        * Clear the PIPE*STAT regs before the IIR
-                        */
-                       if (pipe_stats[pipe] & 0x8000ffff) {
-                               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-                                       DRM_DEBUG_DRIVER("pipe %c underrun\n",
-                                                        pipe_name(pipe));
-                               I915_WRITE(reg, pipe_stats[pipe]);
-                       }
-               }
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-               for_each_pipe(pipe) {
-                       if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
-                               drm_handle_vblank(dev, pipe);
-                       if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
-                               intel_prepare_page_flip(dev, pipe);
-                               intel_finish_page_flip(dev, pipe);
-                       }
-                       if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-                               i9xx_pipe_crc_irq_handler(dev, pipe);
-               }
+               valleyview_pipestat_irq_handler(dev, iir);
  
                /* Consume port.  Then clear IIR or we'll miss events */
                if (iir & I915_DISPLAY_PORT_INTERRUPT) {
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
                        u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  
-                       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-                                        hotplug_status);
                        intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  
                        if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
                        I915_READ(PORT_HOTPLUG_STAT);
                }
  
-               if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
-                       gmbus_irq_handler(dev);
  
                if (pm_iir)
                        gen6_rps_irq_handler(dev_priv, pm_iir);
@@@ -1546,12 -1610,12 +1609,12 @@@ static void ibx_irq_handler(struct drm_
        if (pch_iir & SDE_TRANSA_FIFO_UNDER)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder A FIFO underrun\n");
  
        if (pch_iir & SDE_TRANSB_FIFO_UNDER)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder B FIFO underrun\n");
  }
  
  static void ivb_err_int_handler(struct drm_device *dev)
                if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
                        if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
                                                                  false))
-                               DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
-                                                pipe_name(pipe));
+                               DRM_ERROR("Pipe %c FIFO underrun\n",
+                                         pipe_name(pipe));
                }
  
                if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
@@@ -1593,17 -1657,17 +1656,17 @@@ static void cpt_serr_int_handler(struc
        if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder A FIFO underrun\n");
  
        if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder B FIFO underrun\n");
  
        if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
                if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
                                                          false))
-                       DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
+                       DRM_ERROR("PCH transcoder C FIFO underrun\n");
  
        I915_WRITE(SERR_INT, serr_int);
  }
@@@ -1665,8 -1729,8 +1728,8 @@@ static void ilk_display_irq_handler(str
  
                if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
                        if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
-                               DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
-                                                pipe_name(pipe));
+                               DRM_ERROR("Pipe %c FIFO underrun\n",
+                                         pipe_name(pipe));
  
                if (de_iir & DE_PIPE_CRC_DONE(pipe))
                        i9xx_pipe_crc_irq_handler(dev, pipe);
@@@ -1738,8 -1802,6 +1801,6 @@@ static irqreturn_t ironlake_irq_handler
        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
        irqreturn_t ret = IRQ_NONE;
  
-       atomic_inc(&dev_priv->irq_received);
        /* We get interrupts on unclaimed registers, so check for this before we
         * do any I915_{READ,WRITE}. */
        intel_uncore_check_errors(dev);
@@@ -1808,8 -1870,6 +1869,6 @@@ static irqreturn_t gen8_irq_handler(in
        uint32_t tmp = 0;
        enum pipe pipe;
  
-       atomic_inc(&dev_priv->irq_received);
        master_ctl = I915_READ(GEN8_MASTER_IRQ);
        master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
        if (!master_ctl)
                if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
                        if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
                                                                  false))
-                               DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
-                                                pipe_name(pipe));
+                               DRM_ERROR("Pipe %c FIFO underrun\n",
+                                         pipe_name(pipe));
                }
  
                if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
@@@ -2244,18 -2304,11 +2303,11 @@@ static int valleyview_enable_vblank(str
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
-       u32 imr;
  
        if (!i915_pipe_enabled(dev, pipe))
                return -EINVAL;
  
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       imr = I915_READ(VLV_IMR);
-       if (pipe == PIPE_A)
-               imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-       else
-               imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-       I915_WRITE(VLV_IMR, imr);
        i915_enable_pipestat(dev_priv, pipe,
                             PIPE_START_VBLANK_INTERRUPT_ENABLE);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@@ -2313,17 -2366,10 +2365,10 @@@ static void valleyview_disable_vblank(s
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
-       u32 imr;
  
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        i915_disable_pipestat(dev_priv, pipe,
                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
-       imr = I915_READ(VLV_IMR);
-       if (pipe == PIPE_A)
-               imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
-       else
-               imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
-       I915_WRITE(VLV_IMR, imr);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
  
@@@ -2479,9 -2525,8 +2524,8 @@@ static void i915_hangcheck_elapsed(unsi
  #define BUSY 1
  #define KICK 5
  #define HUNG 20
- #define FIRE 30
  
-       if (!i915_enable_hangcheck)
+       if (!i915.enable_hangcheck)
                return;
  
        for_each_ring(ring, dev_priv, i) {
        }
  
        for_each_ring(ring, dev_priv, i) {
-               if (ring->hangcheck.score > FIRE) {
+               if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
                        DRM_INFO("%s on %s\n",
                                 stuck[i] ? "stuck" : "no progress",
                                 ring->name);
  void i915_queue_hangcheck(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       if (!i915_enable_hangcheck)
+       if (!i915.enable_hangcheck)
                return;
  
        mod_timer(&dev_priv->gpu_error.hangcheck_timer,
@@@ -2632,8 -2677,6 +2676,6 @@@ static void ironlake_irq_preinstall(str
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  
-       atomic_set(&dev_priv->irq_received, 0);
        I915_WRITE(HWSTAM, 0xeffe);
  
        I915_WRITE(DEIMR, 0xffffffff);
@@@ -2650,8 -2693,6 +2692,6 @@@ static void valleyview_irq_preinstall(s
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
-       atomic_set(&dev_priv->irq_received, 0);
        /* VLV magic */
        I915_WRITE(VLV_IMR, 0);
        I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
@@@ -2681,8 -2722,6 +2721,6 @@@ static void gen8_irq_preinstall(struct 
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
  
-       atomic_set(&dev_priv->irq_received, 0);
        I915_WRITE(GEN8_MASTER_IRQ, 0);
        POSTING_READ(GEN8_MASTER_IRQ);
  
@@@ -3007,8 -3046,6 +3045,6 @@@ static void gen8_irq_uninstall(struct d
        if (!dev_priv)
                return;
  
-       atomic_set(&dev_priv->irq_received, 0);
        I915_WRITE(GEN8_MASTER_IRQ, 0);
  
  #define GEN8_IRQ_FINI_NDX(type, which) do { \
@@@ -3049,7 -3086,7 +3085,7 @@@ static void valleyview_irq_uninstall(st
        if (!dev_priv)
                return;
  
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+       intel_hpd_irq_uninstall(dev_priv);
  
        for_each_pipe(pipe)
                I915_WRITE(PIPESTAT(pipe), 0xffff);
@@@ -3072,7 -3109,7 +3108,7 @@@ static void ironlake_irq_uninstall(stru
        if (!dev_priv)
                return;
  
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+       intel_hpd_irq_uninstall(dev_priv);
  
        I915_WRITE(HWSTAM, 0xffffffff);
  
@@@ -3101,8 -3138,6 +3137,6 @@@ static void i8xx_irq_preinstall(struct 
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
-       atomic_set(&dev_priv->irq_received, 0);
        for_each_pipe(pipe)
                I915_WRITE(PIPESTAT(pipe), 0);
        I915_WRITE16(IMR, 0xffff);
@@@ -3187,8 -3222,6 +3221,6 @@@ static irqreturn_t i8xx_irq_handler(in
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  
-       atomic_inc(&dev_priv->irq_received);
        iir = I915_READ16(IIR);
        if (iir == 0)
                return IRQ_NONE;
                        /*
                         * Clear the PIPE*STAT regs before the IIR
                         */
-                       if (pipe_stats[pipe] & 0x8000ffff) {
-                               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-                                       DRM_DEBUG_DRIVER("pipe %c underrun\n",
-                                                        pipe_name(pipe));
+                       if (pipe_stats[pipe] & 0x8000ffff)
                                I915_WRITE(reg, pipe_stats[pipe]);
-                       }
                }
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  
  
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
+                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+                           intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+                               DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
                }
  
                iir = new_iir;
@@@ -3266,8 -3299,6 +3298,6 @@@ static void i915_irq_preinstall(struct 
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
-       atomic_set(&dev_priv->irq_received, 0);
        if (I915_HAS_HOTPLUG(dev)) {
                I915_WRITE(PORT_HOTPLUG_EN, 0);
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@@ -3373,8 -3404,6 +3403,6 @@@ static irqreturn_t i915_irq_handler(in
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
        int pipe, ret = IRQ_NONE;
  
-       atomic_inc(&dev_priv->irq_received);
        iir = I915_READ(IIR);
        do {
                bool irq_received = (iir & ~flip_mask) != 0;
  
                        /* Clear the PIPE*STAT regs before the IIR */
                        if (pipe_stats[pipe] & 0x8000ffff) {
-                               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-                                       DRM_DEBUG_DRIVER("pipe %c underrun\n",
-                                                        pipe_name(pipe));
                                I915_WRITE(reg, pipe_stats[pipe]);
                                irq_received = true;
                        }
                        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
                        u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
  
-                       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-                                 hotplug_status);
                        intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
  
                        I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
  
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
+                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+                           intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+                               DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
                }
  
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@@ -3476,7 -3503,7 +3502,7 @@@ static void i915_irq_uninstall(struct d
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+       intel_hpd_irq_uninstall(dev_priv);
  
        if (I915_HAS_HOTPLUG(dev)) {
                I915_WRITE(PORT_HOTPLUG_EN, 0);
@@@ -3500,8 -3527,6 +3526,6 @@@ static void i965_irq_preinstall(struct 
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
  
-       atomic_set(&dev_priv->irq_received, 0);
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
  
@@@ -3610,21 -3635,17 +3634,17 @@@ static irqreturn_t i965_irq_handler(in
        u32 iir, new_iir;
        u32 pipe_stats[I915_MAX_PIPES];
        unsigned long irqflags;
-       int irq_received;
        int ret = IRQ_NONE, pipe;
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
  
-       atomic_inc(&dev_priv->irq_received);
        iir = I915_READ(IIR);
  
        for (;;) {
+               bool irq_received = (iir & ~flip_mask) != 0;
                bool blc_event = false;
  
-               irq_received = (iir & ~flip_mask) != 0;
                /* Can't rely on pipestat interrupt bit in iir as it might
                 * have been cleared after the pipestat interrupt was received.
                 * It doesn't set the bit in iir again, but it still produces
                         * Clear the PIPE*STAT regs before the IIR
                         */
                        if (pipe_stats[pipe] & 0x8000ffff) {
-                               if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
-                                       DRM_DEBUG_DRIVER("pipe %c underrun\n",
-                                                        pipe_name(pipe));
                                I915_WRITE(reg, pipe_stats[pipe]);
-                               irq_received = 1;
+                               irq_received = true;
                        }
                }
                spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
                                                                  HOTPLUG_INT_STATUS_G4X :
                                                                  HOTPLUG_INT_STATUS_I915);
  
-                       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
-                                 hotplug_status);
                        intel_hpd_irq_handler(dev, hotplug_trigger,
                                              IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
  
  
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
                                i9xx_pipe_crc_irq_handler(dev, pipe);
-               }
  
+                       if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
+                           intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
+                               DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+               }
  
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
                        intel_opregion_asle_intr(dev);
@@@ -3735,7 -3753,7 +3752,7 @@@ static void i965_irq_uninstall(struct d
        if (!dev_priv)
                return;
  
-       del_timer_sync(&dev_priv->hotplug_reenable_timer);
+       intel_hpd_irq_uninstall(dev_priv);
  
        I915_WRITE(PORT_HOTPLUG_EN, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
        I915_WRITE(IIR, I915_READ(IIR));
  }
  
- static void i915_reenable_hotplug_timer_func(unsigned long data)
+ static void intel_hpd_irq_reenable(unsigned long data)
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
        struct drm_device *dev = dev_priv->dev;
@@@ -3799,7 -3817,7 +3816,7 @@@ void intel_irq_init(struct drm_device *
        setup_timer(&dev_priv->gpu_error.hangcheck_timer,
                    i915_hangcheck_elapsed,
                    (unsigned long) dev);
-       setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
+       setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
                    (unsigned long) dev_priv);
  
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
index 4c1672809493c795714bc3eddaa1238c258451ed,4d4a0d912b8eb25c9afcefd2f8aec18a6780af60..0f4cbd0aa59e03cd2ea1030d09069a97ee5cd25e
@@@ -2372,7 -2372,7 +2372,7 @@@ intel_pipe_set_base(struct drm_crtc *cr
         * whether the platform allows pfit disable with pipe active, and only
         * then update the pipesrc and pfit state, even on the flip path.
         */
-       if (i915_fastboot) {
+       if (i915.fastboot) {
                const struct drm_display_mode *adjusted_mode =
                        &intel_crtc->config.adjusted_mode;
  
@@@ -4088,9 -4088,8 +4088,8 @@@ static int valleyview_calc_cdclk(struc
        /* Looks like the 200MHz CDclk freq doesn't work on some configs */
  }
  
- static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv,
-                                unsigned modeset_pipes,
-                                struct intel_crtc_config *pipe_config)
+ /* compute the max pixel clock for new configuration */
+ static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
  {
        struct drm_device *dev = dev_priv->dev;
        struct intel_crtc *intel_crtc;
  
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head) {
-               if (modeset_pipes & (1 << intel_crtc->pipe))
-                       max_pixclk = max(max_pixclk,
-                                        pipe_config->adjusted_mode.crtc_clock);
-               else if (intel_crtc->base.enabled)
+               if (intel_crtc->new_enabled)
                        max_pixclk = max(max_pixclk,
-                                        intel_crtc->config.adjusted_mode.crtc_clock);
+                                        intel_crtc->new_config->adjusted_mode.crtc_clock);
        }
  
        return max_pixclk;
  }
  
  static void valleyview_modeset_global_pipes(struct drm_device *dev,
-                                           unsigned *prepare_pipes,
-                                           unsigned modeset_pipes,
-                                           struct intel_crtc_config *pipe_config)
+                                           unsigned *prepare_pipes)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc;
-       int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes,
-                                              pipe_config);
+       int max_pixclk = intel_mode_max_pixclk(dev_priv);
        int cur_cdclk = valleyview_cur_cdclk(dev_priv);
  
        if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk)
                return;
  
+       /* disable/enable all currently active pipes while we change cdclk */
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head)
                if (intel_crtc->base.enabled)
  static void valleyview_modeset_global_resources(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL);
+       int max_pixclk = intel_mode_max_pixclk(dev_priv);
        int cur_cdclk = valleyview_cur_cdclk(dev_priv);
        int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
  
@@@ -4176,6 -4170,7 +4170,7 @@@ static void valleyview_crtc_enable(stru
  
        intel_update_watermarks(crtc);
        intel_enable_pipe(dev_priv, pipe, false, is_dsi);
+       intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
        intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
@@@ -4214,6 -4209,7 +4209,7 @@@ static void i9xx_crtc_enable(struct drm
  
        intel_update_watermarks(crtc);
        intel_enable_pipe(dev_priv, pipe, false, false);
+       intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
        intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        /* The fixup needs to happen before cursor is enabled */
@@@ -4272,6 -4268,7 +4268,7 @@@ static void i9xx_crtc_disable(struct dr
        intel_disable_planes(crtc);
        intel_disable_primary_plane(dev_priv, plane, pipe);
  
+       intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
        intel_disable_pipe(dev_priv, pipe);
  
        i9xx_pfit_disable(intel_crtc);
@@@ -4583,7 -4580,7 +4580,7 @@@ retry
  static void hsw_compute_ips_config(struct intel_crtc *crtc,
                                   struct intel_crtc_config *pipe_config)
  {
-       pipe_config->ips_enabled = i915_enable_ips &&
+       pipe_config->ips_enabled = i915.enable_ips &&
                                   hsw_crtc_supports_ips(crtc) &&
                                   pipe_config->pipe_bpp <= 24;
  }
@@@ -4784,8 -4781,8 +4781,8 @@@ intel_link_compute_m_n(int bits_per_pix
  
  static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  {
-       if (i915_panel_use_ssc >= 0)
-               return i915_panel_use_ssc != 0;
+       if (i915.panel_use_ssc >= 0)
+               return i915.panel_use_ssc != 0;
        return dev_priv->vbt.lvds_use_ssc
                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
  }
@@@ -4844,7 -4841,7 +4841,7 @@@ static void i9xx_update_pll_dividers(st
  
        crtc->lowfreq_avail = false;
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
-           reduced_clock && i915_powersave) {
+           reduced_clock && i915.powersave) {
                I915_WRITE(FP1(pipe), fp2);
                crtc->config.dpll_hw_state.fp1 = fp2;
                crtc->lowfreq_avail = true;
@@@ -6348,7 -6345,7 +6345,7 @@@ static int ironlake_crtc_mode_set(struc
        if (intel_crtc->config.has_dp_encoder)
                intel_dp_set_m_n(intel_crtc);
  
-       if (is_lvds && has_reduced_clock && i915_powersave)
+       if (is_lvds && has_reduced_clock && i915.powersave)
                intel_crtc->lowfreq_avail = true;
        else
                intel_crtc->lowfreq_avail = false;
@@@ -6716,7 -6713,7 +6713,7 @@@ static void __hsw_enable_package_c8(str
                return;
  
        schedule_delayed_work(&dev_priv->pc8.enable_work,
-                             msecs_to_jiffies(i915_pc8_timeout));
+                             msecs_to_jiffies(i915.pc8_timeout));
  }
  
  static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
@@@ -6815,7 -6812,7 +6812,7 @@@ static void hsw_update_package_c8(struc
        if (!HAS_PC8(dev_priv->dev))
                return;
  
-       if (!i915_enable_pc8)
+       if (!i915.enable_pc8)
                return;
  
        mutex_lock(&dev_priv->pc8.lock);
@@@ -7855,6 -7852,8 +7852,8 @@@ bool intel_get_load_detect_pipe(struct 
        to_intel_connector(connector)->new_encoder = intel_encoder;
  
        intel_crtc = to_intel_crtc(crtc);
+       intel_crtc->new_enabled = true;
+       intel_crtc->new_config = &intel_crtc->config;
        old->dpms_mode = connector->dpms;
        old->load_detect_temp = true;
        old->release_fb = NULL;
                DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
-               mutex_unlock(&crtc->mutex);
-               return false;
+               goto fail;
        }
  
        if (intel_set_mode(crtc, mode, 0, 0, fb)) {
                DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
                if (old->release_fb)
                        old->release_fb->funcs->destroy(old->release_fb);
-               mutex_unlock(&crtc->mutex);
-               return false;
+               goto fail;
        }
  
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev, intel_crtc->pipe);
        return true;
+  fail:
+       intel_crtc->new_enabled = crtc->enabled;
+       if (intel_crtc->new_enabled)
+               intel_crtc->new_config = &intel_crtc->config;
+       else
+               intel_crtc->new_config = NULL;
+       mutex_unlock(&crtc->mutex);
+       return false;
  }
  
  void intel_release_load_detect_pipe(struct drm_connector *connector,
                intel_attached_encoder(connector);
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_crtc *crtc = encoder->crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
                      connector->base.id, drm_get_connector_name(connector),
        if (old->load_detect_temp) {
                to_intel_connector(connector)->new_encoder = NULL;
                intel_encoder->new_crtc = NULL;
+               intel_crtc->new_enabled = false;
+               intel_crtc->new_config = NULL;
                intel_set_mode(crtc, NULL, 0, 0, NULL);
  
                if (old->release_fb) {
@@@ -8201,7 -8210,7 +8210,7 @@@ void intel_mark_idle(struct drm_device 
  
        hsw_package_c8_gpu_idle(dev_priv);
  
-       if (!i915_powersave)
+       if (!i915.powersave)
                return;
  
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@@ -8221,7 -8230,7 +8230,7 @@@ void intel_mark_fb_busy(struct drm_i915
        struct drm_device *dev = obj->base.dev;
        struct drm_crtc *crtc;
  
-       if (!i915_powersave)
+       if (!i915.powersave)
                return;
  
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@@ -8586,20 -8595,6 +8595,20 @@@ static int intel_gen7_queue_flip(struc
        if (ring->id == RCS)
                len += 6;
  
 +      /*
 +       * BSpec MI_DISPLAY_FLIP for IVB:
 +       * "The full packet must be contained within the same cache line."
 +       *
 +       * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
 +       * cacheline, if we ever start emitting more commands before
 +       * the MI_DISPLAY_FLIP we may need to first emit everything else,
 +       * then do the cacheline alignment, and finally emit the
 +       * MI_DISPLAY_FLIP.
 +       */
 +      ret = intel_ring_cacheline_align(ring);
 +      if (ret)
 +              goto err_unpin;
 +
        ret = intel_ring_begin(ring, len);
        if (ret)
                goto err_unpin;
@@@ -8766,6 -8761,7 +8775,7 @@@ static struct drm_crtc_helper_funcs int
   */
  static void intel_modeset_update_staged_output_state(struct drm_device *dev)
  {
+       struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
  
                encoder->new_crtc =
                        to_intel_crtc(encoder->base.crtc);
        }
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               crtc->new_enabled = crtc->base.enabled;
+               if (crtc->new_enabled)
+                       crtc->new_config = &crtc->config;
+               else
+                       crtc->new_config = NULL;
+       }
  }
  
  /**
   */
  static void intel_modeset_commit_output_state(struct drm_device *dev)
  {
+       struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
  
                            base.head) {
                encoder->base.crtc = &encoder->new_crtc->base;
        }
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               crtc->base.enabled = crtc->new_enabled;
+       }
  }
  
  static void
@@@ -9127,29 -9139,22 +9153,22 @@@ intel_modeset_affected_pipes(struct drm
                        *prepare_pipes |= 1 << encoder->new_crtc->pipe;
        }
  
-       /* Check for any pipes that will be fully disabled ... */
+       /* Check for pipes that will be enabled/disabled ... */
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head) {
-               bool used = false;
-               /* Don't try to disable disabled crtcs. */
-               if (!intel_crtc->base.enabled)
+               if (intel_crtc->base.enabled == intel_crtc->new_enabled)
                        continue;
  
-               list_for_each_entry(encoder, &dev->mode_config.encoder_list,
-                                   base.head) {
-                       if (encoder->new_crtc == intel_crtc)
-                               used = true;
-               }
-               if (!used)
+               if (!intel_crtc->new_enabled)
                        *disable_pipes |= 1 << intel_crtc->pipe;
+               else
+                       *prepare_pipes |= 1 << intel_crtc->pipe;
        }
  
  
        /* set_mode is also used to update properties on life display pipes. */
        intel_crtc = to_intel_crtc(crtc);
-       if (crtc->enabled)
+       if (intel_crtc->new_enabled)
                *prepare_pipes |= 1 << intel_crtc->pipe;
  
        /*
@@@ -9208,10 -9213,13 +9227,13 @@@ intel_modeset_update_state(struct drm_d
  
        intel_modeset_commit_output_state(dev);
  
-       /* Update computed state. */
+       /* Double check state. */
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
                            base.head) {
-               intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
+               WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base));
+               WARN_ON(intel_crtc->new_config &&
+                       intel_crtc->new_config != &intel_crtc->config);
+               WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config);
        }
  
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@@ -9380,10 -9388,8 +9402,8 @@@ intel_pipe_config_compare(struct drm_de
        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
                PIPE_CONF_CHECK_I(pipe_bpp);
  
-       if (!HAS_DDI(dev)) {
-               PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
-               PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
-       }
+       PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+       PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
  
  #undef PIPE_CONF_CHECK_X
  #undef PIPE_CONF_CHECK_I
@@@ -9643,6 -9649,7 +9663,7 @@@ static int __intel_set_mode(struct drm_
                }
                intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
                                       "[modeset]");
+               to_intel_crtc(crtc)->new_config = pipe_config;
        }
  
        /*
         * adjusted_mode bits in the crtc directly.
         */
        if (IS_VALLEYVIEW(dev)) {
-               valleyview_modeset_global_pipes(dev, &prepare_pipes,
-                                               modeset_pipes, pipe_config);
+               valleyview_modeset_global_pipes(dev, &prepare_pipes);
  
                /* may have added more to prepare_pipes than we should */
                prepare_pipes &= ~disable_pipes;
                /* mode_set/enable/disable functions rely on a correct pipe
                 * config. */
                to_intel_crtc(crtc)->config = *pipe_config;
+               to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config;
  
                /*
                 * Calculate and store various constants which
@@@ -9746,16 -9753,24 +9767,24 @@@ static void intel_set_config_free(struc
  
        kfree(config->save_connector_encoders);
        kfree(config->save_encoder_crtcs);
+       kfree(config->save_crtc_enabled);
        kfree(config);
  }
  
  static int intel_set_config_save_state(struct drm_device *dev,
                                       struct intel_set_config *config)
  {
+       struct drm_crtc *crtc;
        struct drm_encoder *encoder;
        struct drm_connector *connector;
        int count;
  
+       config->save_crtc_enabled =
+               kcalloc(dev->mode_config.num_crtc,
+                       sizeof(bool), GFP_KERNEL);
+       if (!config->save_crtc_enabled)
+               return -ENOMEM;
        config->save_encoder_crtcs =
                kcalloc(dev->mode_config.num_encoder,
                        sizeof(struct drm_crtc *), GFP_KERNEL);
         * Should anything bad happen only the expected state is
         * restored, not the drivers personal bookkeeping.
         */
+       count = 0;
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               config->save_crtc_enabled[count++] = crtc->enabled;
+       }
        count = 0;
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                config->save_encoder_crtcs[count++] = encoder->crtc;
  static void intel_set_config_restore_state(struct drm_device *dev,
                                           struct intel_set_config *config)
  {
+       struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        struct intel_connector *connector;
        int count;
  
+       count = 0;
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               crtc->new_enabled = config->save_crtc_enabled[count++];
+               if (crtc->new_enabled)
+                       crtc->new_config = &crtc->config;
+               else
+                       crtc->new_config = NULL;
+       }
        count = 0;
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
                encoder->new_crtc =
@@@ -9840,7 -9871,7 +9885,7 @@@ intel_set_config_compute_mode_changes(s
                        struct intel_crtc *intel_crtc =
                                to_intel_crtc(set->crtc);
  
-                       if (intel_crtc->active && i915_fastboot) {
+                       if (intel_crtc->active && i915.fastboot) {
                                DRM_DEBUG_KMS("crtc has no fb, will flip\n");
                                config->fb_changed = true;
                        } else {
@@@ -9876,9 -9907,9 +9921,9 @@@ intel_modeset_stage_output_state(struc
                                 struct drm_mode_set *set,
                                 struct intel_set_config *config)
  {
-       struct drm_crtc *new_crtc;
        struct intel_connector *connector;
        struct intel_encoder *encoder;
+       struct intel_crtc *crtc;
        int ro;
  
        /* The upper layers ensure that we either disable a crtc or have a list
        /* Update crtc of enabled connectors. */
        list_for_each_entry(connector, &dev->mode_config.connector_list,
                            base.head) {
+               struct drm_crtc *new_crtc;
                if (!connector->new_encoder)
                        continue;
  
        }
        /* Now we've also updated encoder->new_crtc for all encoders. */
  
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               crtc->new_enabled = false;
+               list_for_each_entry(encoder,
+                                   &dev->mode_config.encoder_list,
+                                   base.head) {
+                       if (encoder->new_crtc == crtc) {
+                               crtc->new_enabled = true;
+                               break;
+                       }
+               }
+               if (crtc->new_enabled != crtc->base.enabled) {
+                       DRM_DEBUG_KMS("crtc %sabled, full mode switch\n",
+                                     crtc->new_enabled ? "en" : "dis");
+                       config->mode_changed = true;
+               }
+               if (crtc->new_enabled)
+                       crtc->new_config = &crtc->config;
+               else
+                       crtc->new_config = NULL;
+       }
        return 0;
  }
  
+ static void disable_crtc_nofb(struct intel_crtc *crtc)
+ {
+       struct drm_device *dev = crtc->base.dev;
+       struct intel_encoder *encoder;
+       struct intel_connector *connector;
+       DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
+                     pipe_name(crtc->pipe));
+       list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
+               if (connector->new_encoder &&
+                   connector->new_encoder->new_crtc == crtc)
+                       connector->new_encoder = NULL;
+       }
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+               if (encoder->new_crtc == crtc)
+                       encoder->new_crtc = NULL;
+       }
+       crtc->new_enabled = false;
+       crtc->new_config = NULL;
+ }
  static int intel_crtc_set_config(struct drm_mode_set *set)
  {
        struct drm_device *dev;
                 * flipping, so increasing its cost here shouldn't be a big
                 * deal).
                 */
-               if (i915_fastboot && ret == 0)
+               if (i915.fastboot && ret == 0)
                        intel_modeset_check_state(set->crtc->dev);
        }
  
  fail:
                intel_set_config_restore_state(dev, config);
  
+               /*
+                * HACK: if the pipe was on, but we didn't have a framebuffer,
+                * force the pipe off to avoid oopsing in the modeset code
+                * due to fb==NULL. This should only happen during boot since
+                * we don't yet reconstruct the FB from the hardware state.
+                */
+               if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
+                       disable_crtc_nofb(to_intel_crtc(save_set.crtc));
                /* Try to restore the config */
                if (config->mode_changed &&
                    intel_set_mode(save_set.crtc, save_set.mode,
@@@ -10839,6 -10930,9 +10944,9 @@@ static struct intel_quirk intel_quirks[
  
        /* Acer Aspire 4736Z */
        { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+       /* Acer Aspire 5336 */
+       { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
  };
  
  static void intel_init_quirks(struct drm_device *dev)
@@@ -10869,6 -10963,7 +10977,7 @@@ static void i915_disable_vga(struct drm
        u8 sr1;
        u32 vga_reg = i915_vgacntrl_reg(dev);
  
+       /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
        vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
        outb(SR01, VGA_SR_INDEX);
        sr1 = inb(VGA_SR_DATA);
@@@ -11265,7 -11360,7 +11374,7 @@@ void intel_modeset_setup_hw_state(struc
         */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list,
                            base.head) {
-               if (crtc->active && i915_fastboot) {
+               if (crtc->active && i915.fastboot) {
                        intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
  
                        DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
@@@ -11329,7 -11424,6 +11438,6 @@@ void intel_modeset_gem_init(struct drm_
        intel_setup_overlay(dev);
  
        mutex_lock(&dev->mode_config.mutex);
-       drm_mode_config_reset(dev);
        intel_modeset_setup_hw_state(dev, false);
        mutex_unlock(&dev->mode_config.mutex);
  }
index 57552eb386b0b1b6d735a7fb0f9b770405e5f411,0ef2690537510addd0e52aa44814e5d9ea50f415..bd1df502bc34faaf36effec0665d45d5019f20a8
@@@ -91,18 -91,25 +91,25 @@@ static struct intel_dp *intel_attached_
  }
  
  static void intel_dp_link_down(struct intel_dp *intel_dp);
+ static void edp_panel_vdd_on(struct intel_dp *intel_dp);
+ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
  
  static int
  intel_dp_max_link_bw(struct intel_dp *intel_dp)
  {
        int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
+       struct drm_device *dev = intel_dp->attached_connector->base.dev;
  
        switch (max_link_bw) {
        case DP_LINK_BW_1_62:
        case DP_LINK_BW_2_7:
                break;
        case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
-               max_link_bw = DP_LINK_BW_2_7;
+               if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
+                   intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
+                       max_link_bw = DP_LINK_BW_5_4;
+               else
+                       max_link_bw = DP_LINK_BW_2_7;
                break;
        default:
                WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
@@@ -294,7 -301,7 +301,7 @@@ static u32 _pp_stat_reg(struct intel_d
                return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
  }
  
- static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
+ static bool edp_have_panel_power(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
  }
  
- static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
+ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -319,7 -326,7 +326,7 @@@ intel_dp_check_edp(struct intel_dp *int
        if (!is_edp(intel_dp))
                return;
  
-       if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
+       if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
                WARN(1, "eDP powered off while attempting aux channel communication.\n");
                DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
                              I915_READ(_pp_stat_reg(intel_dp)),
@@@ -351,31 -358,46 +358,46 @@@ intel_dp_aux_wait_done(struct intel_dp 
        return status;
  }
  
- static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
-                                     int index)
+ static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
  {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
  
-       /* The clock divider is based off the hrawclk,
-        * and would like to run at 2MHz. So, take the
-        * hrawclk value and divide by 2 and use that
-        *
-        * Note that PCH attached eDP panels should use a 125MHz input
-        * clock divider.
+       /*
+        * The clock divider is based off the hrawclk, and would like to run at
+        * 2MHz.  So, take the hrawclk value and divide by 2 and use that
         */
-       if (IS_VALLEYVIEW(dev)) {
-               return index ? 0 : 100;
-       } else if (intel_dig_port->port == PORT_A) {
-               if (index)
-                       return 0;
-               if (HAS_DDI(dev))
-                       return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
-               else if (IS_GEN6(dev) || IS_GEN7(dev))
+       return index ? 0 : intel_hrawclk(dev) / 2;
+ }
+ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       if (index)
+               return 0;
+       if (intel_dig_port->port == PORT_A) {
+               if (IS_GEN6(dev) || IS_GEN7(dev))
                        return 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
                        return 225; /* eDP input clock at 450Mhz */
+       } else {
+               return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+       }
+ }
+ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (intel_dig_port->port == PORT_A) {
+               if (index)
+                       return 0;
+               return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
        } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                /* Workaround for non-ULT HSW */
                switch (index) {
                case 1: return 72;
                default: return 0;
                }
-       } else if (HAS_PCH_SPLIT(dev)) {
+       } else  {
                return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
-       } else {
-               return index ? 0 :intel_hrawclk(dev) / 2;
        }
  }
  
+ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+ {
+       return index ? 0 : 100;
+ }
+ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
+                                     bool has_aux_irq,
+                                     int send_bytes,
+                                     uint32_t aux_clock_divider)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       uint32_t precharge, timeout;
+       if (IS_GEN6(dev))
+               precharge = 3;
+       else
+               precharge = 5;
+       if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
+               timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
+       else
+               timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
+       return DP_AUX_CH_CTL_SEND_BUSY |
+              DP_AUX_CH_CTL_DONE |
+              (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+              DP_AUX_CH_CTL_TIME_OUT_ERROR |
+              timeout |
+              DP_AUX_CH_CTL_RECEIVE_ERROR |
+              (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+              (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+              (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
+ }
  static int
  intel_dp_aux_ch(struct intel_dp *intel_dp,
                uint8_t *send, int send_bytes,
        uint32_t aux_clock_divider;
        int i, ret, recv_bytes;
        uint32_t status;
-       int try, precharge, clock = 0;
+       int try, clock = 0;
 -      bool has_aux_irq = true;
 +      bool has_aux_irq = HAS_AUX_IRQ(dev);
-       uint32_t timeout;
  
        /* dp aux is extremely sensitive to irq latency, hence request the
         * lowest possible wakeup latency and so prevent the cpu from going into
  
        intel_dp_check_edp(intel_dp);
  
-       if (IS_GEN6(dev))
-               precharge = 3;
-       else
-               precharge = 5;
-       if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
-               timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
-       else
-               timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
        intel_aux_display_runtime_get(dev_priv);
  
        /* Try to wait for any previous AUX channel activity */
                goto out;
        }
  
-       while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
+       while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
+               u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
+                                                         has_aux_irq,
+                                                         send_bytes,
+                                                         aux_clock_divider);
                /* Must try at least 3 times according to DP spec */
                for (try = 0; try < 5; try++) {
                        /* Load the send data into the aux channel data registers */
                                           pack_aux(send + i, send_bytes - i));
  
                        /* Send the command and wait for it to complete */
-                       I915_WRITE(ch_ctl,
-                                  DP_AUX_CH_CTL_SEND_BUSY |
-                                  (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
-                                  timeout |
-                                  (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-                                  (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
-                                  (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
-                                  DP_AUX_CH_CTL_DONE |
-                                  DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                                  DP_AUX_CH_CTL_RECEIVE_ERROR);
+                       I915_WRITE(ch_ctl, send_ctl);
  
                        status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
  
@@@ -537,7 -577,6 +577,7 @@@ intel_dp_aux_native_write(struct intel_
        uint8_t msg[20];
        int msg_bytes;
        uint8_t ack;
 +      int retry;
  
        if (WARN_ON(send_bytes > 16))
                return -E2BIG;
        msg[3] = send_bytes - 1;
        memcpy(&msg[4], send, send_bytes);
        msg_bytes = send_bytes + 4;
 -      for (;;) {
 +      for (retry = 0; retry < 7; retry++) {
                ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
                if (ret < 0)
                        return ret;
                ack >>= 4;
                if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
 -                      break;
 +                      return send_bytes;
                else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
 -                      udelay(100);
 +                      usleep_range(400, 500);
                else
                        return -EIO;
        }
 -      return send_bytes;
 +
 +      DRM_ERROR("too many retries, giving up\n");
 +      return -EIO;
  }
  
  /* Write a single byte to the aux channel in native mode */
@@@ -585,7 -622,6 +625,7 @@@ intel_dp_aux_native_read(struct intel_d
        int reply_bytes;
        uint8_t ack;
        int ret;
 +      int retry;
  
        if (WARN_ON(recv_bytes > 19))
                return -E2BIG;
        msg_bytes = 4;
        reply_bytes = recv_bytes + 1;
  
 -      for (;;) {
 +      for (retry = 0; retry < 7; retry++) {
                ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
                                      reply, reply_bytes);
                if (ret == 0)
                        return ret - 1;
                }
                else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
 -                      udelay(100);
 +                      usleep_range(400, 500);
                else
                        return -EIO;
        }
 +
 +      DRM_ERROR("too many retries, giving up\n");
 +      return -EIO;
  }
  
  static int
@@@ -637,7 -670,7 +677,7 @@@ intel_dp_i2c_aux_ch(struct i2c_adapter 
        int reply_bytes;
        int ret;
  
-       ironlake_edp_panel_vdd_on(intel_dp);
+       edp_panel_vdd_on(intel_dp);
        intel_dp_check_edp(intel_dp);
        /* Set up the command byte */
        if (mode & MODE_I2C_READ)
        ret = -EREMOTEIO;
  
  out:
-       ironlake_edp_panel_vdd_off(intel_dp, false);
+       edp_panel_vdd_off(intel_dp, false);
        return ret;
  }
  
@@@ -812,9 -845,10 +852,10 @@@ intel_dp_compute_config(struct intel_en
        struct intel_connector *intel_connector = intel_dp->attached_connector;
        int lane_count, clock;
        int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
-       int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+       /* Conveniently, the link BW constants become indices with a shift...*/
+       int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
        int bpp, mode_rate;
-       static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+       static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
        int link_avail, link_clock;
  
        if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
@@@ -1015,16 -1049,16 +1056,16 @@@ static void intel_dp_mode_set(struct in
                ironlake_set_pll_cpu_edp(intel_dp);
  }
  
- #define IDLE_ON_MASK          (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
- #define IDLE_ON_VALUE         (PP_ON | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
+ #define IDLE_ON_MASK          (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
+ #define IDLE_ON_VALUE         (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
  
- #define IDLE_OFF_MASK         (PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
- #define IDLE_OFF_VALUE                (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+ #define IDLE_OFF_MASK         (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
+ #define IDLE_OFF_VALUE                (0     | PP_SEQUENCE_NONE | 0                     | 0)
  
- #define IDLE_CYCLE_MASK               (PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
- #define IDLE_CYCLE_VALUE      (0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
+ #define IDLE_CYCLE_MASK               (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
+ #define IDLE_CYCLE_VALUE      (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
  
- static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
+ static void wait_panel_status(struct intel_dp *intel_dp,
                                       u32 mask,
                                       u32 value)
  {
        DRM_DEBUG_KMS("Wait complete\n");
  }
  
- static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
+ static void wait_panel_on(struct intel_dp *intel_dp)
  {
        DRM_DEBUG_KMS("Wait for panel power on\n");
-       ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
+       wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
  }
  
- static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
+ static void wait_panel_off(struct intel_dp *intel_dp)
  {
        DRM_DEBUG_KMS("Wait for panel power off time\n");
-       ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
+       wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
  }
  
- static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
+ static void wait_panel_power_cycle(struct intel_dp *intel_dp)
  {
        DRM_DEBUG_KMS("Wait for panel power cycle\n");
-       ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+       /* When we disable the VDD override bit last we have to do the manual
+        * wait. */
+       wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
+                                      intel_dp->panel_power_cycle_delay);
+       wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
+ }
+ static void wait_backlight_on(struct intel_dp *intel_dp)
+ {
+       wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
+                                      intel_dp->backlight_on_delay);
  }
  
+ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
+ {
+       wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
+                                      intel_dp->backlight_off_delay);
+ }
  
  /* Read the current pp_control value, unlocking the register if it
   * is locked
@@@ -1084,7 -1135,7 +1142,7 @@@ static  u32 ironlake_get_pp_control(str
        return control;
  }
  
void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void edp_panel_vdd_on(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        intel_dp->want_panel_vdd = true;
  
-       if (ironlake_edp_have_panel_vdd(intel_dp))
+       if (edp_have_panel_vdd(intel_dp))
                return;
  
        intel_runtime_pm_get(dev_priv);
  
        DRM_DEBUG_KMS("Turning eDP VDD on\n");
  
-       if (!ironlake_edp_have_panel_power(intel_dp))
-               ironlake_wait_panel_power_cycle(intel_dp);
+       if (!edp_have_panel_power(intel_dp))
+               wait_panel_power_cycle(intel_dp);
  
        pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_FORCE_VDD;
        /*
         * If the panel wasn't on, delay before accessing aux channel
         */
-       if (!ironlake_edp_have_panel_power(intel_dp)) {
+       if (!edp_have_panel_power(intel_dp)) {
                DRM_DEBUG_KMS("eDP was not running\n");
                msleep(intel_dp->panel_power_up_delay);
        }
  }
  
- static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
+ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
  
-       if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
+       if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
                DRM_DEBUG_KMS("Turning eDP VDD off\n");
  
                pp = ironlake_get_pp_control(intel_dp);
                I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
  
                if ((pp & POWER_TARGET_ON) == 0)
-                       msleep(intel_dp->panel_power_cycle_delay);
+                       intel_dp->last_power_cycle = jiffies;
  
                intel_runtime_pm_put(dev_priv);
        }
  }
  
- static void ironlake_panel_vdd_work(struct work_struct *__work)
+ static void edp_panel_vdd_work(struct work_struct *__work)
  {
        struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
                                                 struct intel_dp, panel_vdd_work);
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
  
        mutex_lock(&dev->mode_config.mutex);
-       ironlake_panel_vdd_off_sync(intel_dp);
+       edp_panel_vdd_off_sync(intel_dp);
        mutex_unlock(&dev->mode_config.mutex);
  }
  
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
  {
        if (!is_edp(intel_dp))
                return;
        intel_dp->want_panel_vdd = false;
  
        if (sync) {
-               ironlake_panel_vdd_off_sync(intel_dp);
+               edp_panel_vdd_off_sync(intel_dp);
        } else {
                /*
                 * Queue the timer to fire a long
        }
  }
  
- void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+ void intel_edp_panel_on(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        DRM_DEBUG_KMS("Turn eDP power on\n");
  
-       if (ironlake_edp_have_panel_power(intel_dp)) {
+       if (edp_have_panel_power(intel_dp)) {
                DRM_DEBUG_KMS("eDP power already on\n");
                return;
        }
  
-       ironlake_wait_panel_power_cycle(intel_dp);
+       wait_panel_power_cycle(intel_dp);
  
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
  
-       ironlake_wait_panel_on(intel_dp);
+       wait_panel_on(intel_dp);
+       intel_dp->last_power_on = jiffies;
  
        if (IS_GEN5(dev)) {
                pp |= PANEL_POWER_RESET; /* restore panel reset bit */
        }
  }
  
- void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+ void intel_edp_panel_off(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        DRM_DEBUG_KMS("Turn eDP power off\n");
  
+       edp_wait_backlight_off(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
        /* We need to switch off panel power _and_ force vdd, for otherwise some
         * panels get very unhappy and cease to work. */
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
  
-       ironlake_wait_panel_off(intel_dp);
+       intel_dp->last_power_cycle = jiffies;
+       wait_panel_off(intel_dp);
  }
  
- void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+ void intel_edp_backlight_on(struct intel_dp *intel_dp)
  {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
         * link.  So delay a bit to make sure the image is solid before
         * allowing it to appear.
         */
-       msleep(intel_dp->backlight_on_delay);
+       wait_backlight_on(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_BLC_ENABLE;
  
        intel_panel_enable_backlight(intel_dp->attached_connector);
  }
  
- void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+ void intel_edp_backlight_off(struct intel_dp *intel_dp)
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
-       msleep(intel_dp->backlight_off_delay);
+       intel_dp->last_backlight_off = jiffies;
  }
  
  static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@@ -1597,10 -1652,12 +1659,12 @@@ static void intel_edp_psr_enable_sink(s
  {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
+       uint32_t aux_clock_divider;
        int precharge = 0x3;
        int msg_size = 5;       /* Header(4) + Message(1) */
  
+       aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
        /* Enable PSR in sink */
        if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
                intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
@@@ -1668,7 -1725,7 +1732,7 @@@ static bool intel_edp_psr_match_conditi
                return false;
        }
  
-       if (!i915_enable_psr) {
+       if (!i915.enable_psr) {
                DRM_DEBUG_KMS("PSR disable by flag\n");
                return false;
        }
@@@ -1784,9 -1841,9 +1848,9 @@@ static void intel_disable_dp(struct int
  
        /* Make sure the panel is off before trying to change the mode. But also
         * ensure that we have vdd while we switch off the panel. */
-       ironlake_edp_backlight_off(intel_dp);
+       intel_edp_backlight_off(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
-       ironlake_edp_panel_off(intel_dp);
+       intel_edp_panel_off(intel_dp);
  
        /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
        if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
@@@ -1816,11 -1873,11 +1880,11 @@@ static void intel_enable_dp(struct inte
        if (WARN_ON(dp_reg & DP_PORT_EN))
                return;
  
-       ironlake_edp_panel_vdd_on(intel_dp);
+       edp_panel_vdd_on(intel_dp);
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_start_link_train(intel_dp);
-       ironlake_edp_panel_on(intel_dp);
-       ironlake_edp_panel_vdd_off(intel_dp, true);
+       intel_edp_panel_on(intel_dp);
+       edp_panel_vdd_off(intel_dp, true);
        intel_dp_complete_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
  }
@@@ -1830,14 -1887,14 +1894,14 @@@ static void g4x_enable_dp(struct intel_
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  
        intel_enable_dp(encoder);
-       ironlake_edp_backlight_on(intel_dp);
+       intel_edp_backlight_on(intel_dp);
  }
  
  static void vlv_enable_dp(struct intel_encoder *encoder)
  {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
  
-       ironlake_edp_backlight_on(intel_dp);
+       intel_edp_backlight_on(intel_dp);
  }
  
  static void g4x_pre_enable_dp(struct intel_encoder *encoder)
@@@ -1876,12 -1933,10 +1940,12 @@@ static void vlv_pre_enable_dp(struct in
  
        mutex_unlock(&dev_priv->dpio_lock);
  
 -      /* init power sequencer on this pipe and port */
 -      intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
 -      intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
 -                                                    &power_seq);
 +      if (is_edp(intel_dp)) {
 +              /* init power sequencer on this pipe and port */
 +              intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
 +              intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
 +                                                            &power_seq);
 +      }
  
        intel_enable_dp(encoder);
  
@@@ -2630,10 -2685,15 +2694,15 @@@ intel_dp_complete_link_train(struct int
        bool channel_eq = false;
        int tries, cr_tries;
        uint32_t DP = intel_dp->DP;
+       uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+       /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
+       if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
+               training_pattern = DP_TRAINING_PATTERN_3;
  
        /* channel equalization */
        if (!intel_dp_set_link_train(intel_dp, &DP,
-                                    DP_TRAINING_PATTERN_2 |
+                                    training_pattern |
                                     DP_LINK_SCRAMBLING_DISABLE)) {
                DRM_ERROR("failed to start channel equalization\n");
                return;
                if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        intel_dp_start_link_train(intel_dp);
                        intel_dp_set_link_train(intel_dp, &DP,
-                                               DP_TRAINING_PATTERN_2 |
+                                               training_pattern |
                                                DP_LINK_SCRAMBLING_DISABLE);
                        cr_tries++;
                        continue;
                        intel_dp_link_down(intel_dp);
                        intel_dp_start_link_train(intel_dp);
                        intel_dp_set_link_train(intel_dp, &DP,
-                                               DP_TRAINING_PATTERN_2 |
+                                               training_pattern |
                                                DP_LINK_SCRAMBLING_DISABLE);
                        tries = 0;
                        cr_tries++;
@@@ -2818,6 -2878,14 +2887,14 @@@ intel_dp_get_dpcd(struct intel_dp *inte
                }
        }
  
+       /* Training Pattern 3 support */
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
+           intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
+               intel_dp->use_tps3 = true;
+               DRM_DEBUG_KMS("Displayport TPS3 supported");
+       } else
+               intel_dp->use_tps3 = false;
        if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
              DP_DWN_STRM_PORT_PRESENT))
                return true; /* native DP sink */
@@@ -2841,7 -2909,7 +2918,7 @@@ intel_dp_probe_oui(struct intel_dp *int
        if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
                return;
  
-       ironlake_edp_panel_vdd_on(intel_dp);
+       edp_panel_vdd_on(intel_dp);
  
        if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
                DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
                DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
                              buf[0], buf[1], buf[2]);
  
-       ironlake_edp_panel_vdd_off(intel_dp, false);
+       edp_panel_vdd_off(intel_dp, false);
+ }
+ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
+ {
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(intel_dig_port->base.base.crtc);
+       u8 buf[1];
+       if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1))
+               return -EAGAIN;
+       if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
+               return -ENOTTY;
+       if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK,
+                                        DP_TEST_SINK_START))
+               return -EAGAIN;
+       /* Wait 2 vblanks to be sure we will have the correct CRC value */
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
+       if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6))
+               return -EAGAIN;
+       intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0);
+       return 0;
  }
  
  static bool
@@@ -3295,7 -3392,7 +3401,7 @@@ void intel_dp_encoder_destroy(struct dr
        if (is_edp(intel_dp)) {
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                mutex_lock(&dev->mode_config.mutex);
-               ironlake_panel_vdd_off_sync(intel_dp);
+               edp_panel_vdd_off_sync(intel_dp);
                mutex_unlock(&dev->mode_config.mutex);
        }
        kfree(intel_dig_port);
@@@ -3394,6 -3491,13 +3500,13 @@@ intel_dp_add_properties(struct intel_d
        }
  }
  
+ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
+ {
+       intel_dp->last_power_cycle = jiffies;
+       intel_dp->last_power_on = jiffies;
+       intel_dp->last_backlight_off = jiffies;
+ }
  static void
  intel_dp_init_panel_power_sequencer(struct drm_device *dev,
                                    struct intel_dp *intel_dp,
@@@ -3516,10 -3620,17 +3629,17 @@@ intel_dp_init_panel_power_sequencer_reg
                pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
        }
  
-       /* And finally store the new values in the power sequencer. */
+       /*
+        * And finally store the new values in the power sequencer. The
+        * backlight delays are set to 1 because we do manual waits on them. For
+        * T8, even BSpec recommends doing it. For T9, if we don't do this,
+        * we'll end up waiting for the backlight off delay twice: once when we
+        * do the manual sleep, and once when we disable the panel and wait for
+        * the PP_STATUS bit to become zero.
+        */
        pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
-               (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
-       pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+               (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
+       pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
                 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
        /* Compute the divisor for the pp clock, simply match the Bspec
         * formula. */
  }
  
  static bool intel_edp_init_connector(struct intel_dp *intel_dp,
-                                    struct intel_connector *intel_connector)
+                                    struct intel_connector *intel_connector,
+                                    struct edp_power_seq *power_seq)
  {
        struct drm_connector *connector = &intel_connector->base;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *fixed_mode = NULL;
-       struct edp_power_seq power_seq = { 0 };
        bool has_dpcd;
        struct drm_display_mode *scan;
        struct edid *edid;
        if (!is_edp(intel_dp))
                return true;
  
-       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
        /* Cache DPCD and EDID for edp. */
-       ironlake_edp_panel_vdd_on(intel_dp);
+       edp_panel_vdd_on(intel_dp);
        has_dpcd = intel_dp_get_dpcd(intel_dp);
-       ironlake_edp_panel_vdd_off(intel_dp, false);
+       edp_panel_vdd_off(intel_dp, false);
  
        if (has_dpcd) {
                if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
        }
  
        /* We now know it's not a ghost, init power sequence regs. */
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
-                                                     &power_seq);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
  
        edid = drm_get_edid(connector, &intel_dp->adapter);
        if (edid) {
@@@ -3638,9 -3746,22 +3755,22 @@@ intel_dp_init_connector(struct intel_di
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_dig_port->port;
+       struct edp_power_seq power_seq = { 0 };
        const char *name = NULL;
        int type, error;
  
+       /* intel_dp vfuncs */
+       if (IS_VALLEYVIEW(dev))
+               intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
+       else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
+       else if (HAS_PCH_SPLIT(dev))
+               intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
+       else
+               intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
+       intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
        /* Preserve the current hw state. */
        intel_dp->DP = I915_READ(intel_dp->output_reg);
        intel_dp->attached_connector = intel_connector;
        connector->doublescan_allowed = 0;
  
        INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
-                         ironlake_panel_vdd_work);
+                         edp_panel_vdd_work);
  
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        drm_sysfs_connector_add(connector);
                BUG();
        }
  
+       if (is_edp(intel_dp)) {
+               intel_dp_init_panel_power_timestamps(intel_dp);
+               intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+       }
        error = intel_dp_i2c_init(intel_dp, intel_connector, name);
        WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
             error, port_name(port));
  
        intel_dp->psr_setup_done = false;
  
-       if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+       if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
                i2c_del_adapter(&intel_dp->adapter);
                if (is_edp(intel_dp)) {
                        cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                        mutex_lock(&dev->mode_config.mutex);
-                       ironlake_panel_vdd_off_sync(intel_dp);
+                       edp_panel_vdd_off_sync(intel_dp);
                        mutex_unlock(&dev->mode_config.mutex);
                }
                drm_sysfs_connector_remove(connector);
index 31b36c5ac8941e844cd9f995b0c4e575219fc8ea,ba686d75ff32bb289c87583d4ec4b78c056ce59a..8c1c0bc3e630868ca98980bab93a6f6d75fac5cf
@@@ -549,7 -549,7 +549,7 @@@ init_pipe_control(struct intel_ring_buf
        return 0;
  
  err_unpin:
-       i915_gem_object_unpin(ring->scratch.obj);
+       i915_gem_object_ggtt_unpin(ring->scratch.obj);
  err_unref:
        drm_gem_object_unreference(&ring->scratch.obj->base);
  err:
@@@ -625,7 -625,7 +625,7 @@@ static void render_ring_cleanup(struct 
  
        if (INTEL_INFO(dev)->gen >= 5) {
                kunmap(sg_page(ring->scratch.obj->pages->sgl));
-               i915_gem_object_unpin(ring->scratch.obj);
+               i915_gem_object_ggtt_unpin(ring->scratch.obj);
        }
  
        drm_gem_object_unreference(&ring->scratch.obj->base);
@@@ -1253,7 -1253,7 +1253,7 @@@ static void cleanup_status_page(struct 
                return;
  
        kunmap(sg_page(obj->pages->sgl));
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
        drm_gem_object_unreference(&obj->base);
        ring->status_page.obj = NULL;
  }
@@@ -1293,7 -1293,7 +1293,7 @@@ static int init_status_page(struct inte
        return 0;
  
  err_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
  err_unref:
        drm_gem_object_unreference(&obj->base);
  err:
@@@ -1390,7 -1390,7 +1390,7 @@@ static int intel_init_ring_buffer(struc
  err_unmap:
        iounmap(ring->virtual_start);
  err_unpin:
-       i915_gem_object_unpin(obj);
+       i915_gem_object_ggtt_unpin(obj);
  err_unref:
        drm_gem_object_unreference(&obj->base);
        ring->obj = NULL;
@@@ -1418,7 -1418,7 +1418,7 @@@ void intel_cleanup_ring_buffer(struct i
  
        iounmap(ring->virtual_start);
  
-       i915_gem_object_unpin(ring->obj);
+       i915_gem_object_ggtt_unpin(ring->obj);
        drm_gem_object_unreference(&ring->obj->base);
        ring->obj = NULL;
        ring->preallocated_lazy_request = NULL;
        cleanup_status_page(ring);
  }
  
- static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
- {
-       int ret;
-       ret = i915_wait_seqno(ring, seqno);
-       if (!ret)
-               i915_gem_retire_requests_ring(ring);
-       return ret;
- }
  static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
  {
        struct drm_i915_gem_request *request;
-       u32 seqno = 0;
+       u32 seqno = 0, tail;
        int ret;
  
-       i915_gem_retire_requests_ring(ring);
        if (ring->last_retired_head != -1) {
                ring->head = ring->last_retired_head;
                ring->last_retired_head = -1;
                ring->space = ring_space(ring);
                if (ring->space >= n)
                        return 0;
                        space += ring->size;
                if (space >= n) {
                        seqno = request->seqno;
+                       tail = request->tail;
                        break;
                }
  
        if (seqno == 0)
                return -ENOSPC;
  
-       ret = intel_ring_wait_seqno(ring, seqno);
+       ret = i915_wait_seqno(ring, seqno);
        if (ret)
                return ret;
  
-       if (WARN_ON(ring->last_retired_head == -1))
-               return -ENOSPC;
-       ring->head = ring->last_retired_head;
-       ring->last_retired_head = -1;
+       ring->head = tail;
        ring->space = ring_space(ring);
        if (WARN_ON(ring->space < n))
                return -ENOSPC;
@@@ -1653,27 -1638,6 +1638,27 @@@ int intel_ring_begin(struct intel_ring_
        return 0;
  }
  
 +/* Align the ring tail to a cacheline boundary */
 +int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
 +{
 +      int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
 +      int ret;
 +
 +      if (num_dwords == 0)
 +              return 0;
 +
 +      ret = intel_ring_begin(ring, num_dwords);
 +      if (ret)
 +              return ret;
 +
 +      while (num_dwords--)
 +              intel_ring_emit(ring, MI_NOOP);
 +
 +      intel_ring_advance(ring);
 +
 +      return 0;
 +}
 +
  void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
  {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
index 0b243ce337147d51f7e9cb4b09d774f4cc33e856,38c757e136dc988c96bd800c8ee638aba65b20ba..08b91c6ac70a125d1a1b214a7189c8c6af60b5ae
@@@ -41,6 -41,8 +41,8 @@@ enum intel_ring_hangcheck_action 
        HANGCHECK_HUNG,
  };
  
+ #define HANGCHECK_SCORE_RING_HUNG 31
  struct intel_ring_hangcheck {
        bool deadlock;
        u32 seqno;
@@@ -233,7 -235,6 +235,7 @@@ intel_write_status_page(struct intel_ri
  void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
  
  int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
 +int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
  static inline void intel_ring_emit(struct intel_ring_buffer *ring,
                                   u32 data)
  {
index b7488c9849ad7bb05b50c4242b43bc65f50a2839,73c3d1f2b20d58bdfc6c2d08558829a66a647430..42947566e755c00a3cc8e14c9cd34f12ae92a136
  
  #define DP_TEST_PATTERN                           0x221
  
+ #define DP_TEST_CRC_R_CR                  0x240
+ #define DP_TEST_CRC_G_Y                           0x242
+ #define DP_TEST_CRC_B_CB                  0x244
+ #define DP_TEST_SINK_MISC                 0x246
+ #define DP_TEST_CRC_SUPPORTED             (1 << 5)
  #define DP_TEST_RESPONSE                  0x260
  # define DP_TEST_ACK                      (1 << 0)
  # define DP_TEST_NAK                      (1 << 1)
  # define DP_TEST_EDID_CHECKSUM_WRITE      (1 << 2)
  
+ #define DP_TEST_SINK                      0x270
+ #define DP_TEST_SINK_START        (1 << 0)
  #define DP_SOURCE_OUI                     0x300
  #define DP_SINK_OUI                       0x400
  #define DP_BRANCH_OUI                     0x500
  #define DP_SET_POWER                        0x600
  # define DP_SET_POWER_D0                    0x1
  # define DP_SET_POWER_D3                    0x2
 +# define DP_SET_POWER_MASK                  0x3
  
  #define DP_PSR_ERROR_STATUS                 0x2006  /* XXX 1.2? */
  # define DP_PSR_LINK_CRC_ERROR              (1 << 0)
@@@ -399,114 -408,4 +409,114 @@@ drm_dp_enhanced_frame_cap(const u8 dpcd
                (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
  }
  
 +/*
 + * DisplayPort AUX channel
 + */
 +
 +/**
 + * struct drm_dp_aux_msg - DisplayPort AUX channel transaction
 + * @address: address of the (first) register to access
 + * @request: contains the type of transaction (see DP_AUX_* macros)
 + * @reply: upon completion, contains the reply type of the transaction
 + * @buffer: pointer to a transmission or reception buffer
 + * @size: size of @buffer
 + */
 +struct drm_dp_aux_msg {
 +      unsigned int address;
 +      u8 request;
 +      u8 reply;
 +      void *buffer;
 +      size_t size;
 +};
 +
 +/**
 + * struct drm_dp_aux - DisplayPort AUX channel
 + * @ddc: I2C adapter that can be used for I2C-over-AUX communication
 + * @dev: pointer to struct device that is the parent for this AUX channel
 + * @transfer: transfers a message representing a single AUX transaction
 + *
 + * The .dev field should be set to a pointer to the device that implements
 + * the AUX channel.
 + *
 + * Drivers provide a hardware-specific implementation of how transactions
 + * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg
 + * structure describing the transaction is passed into this function. Upon
 + * success, the implementation should return the number of payload bytes
 + * that were transferred, or a negative error-code on failure. Helpers
 + * propagate errors from the .transfer() function, with the exception of
 + * the -EBUSY error, which causes a transaction to be retried. On a short,
 + * helpers will return -EPROTO to make it simpler to check for failure.
 + *
 + * An AUX channel can also be used to transport I2C messages to a sink. A
 + * typical application of that is to access an EDID that's present in the
 + * sink device. The .transfer() function can also be used to execute such
 + * transactions. The drm_dp_aux_register_i2c_bus() function registers an
 + * I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
 + * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter.
 + */
 +struct drm_dp_aux {
 +      struct i2c_adapter ddc;
 +      struct device *dev;
 +
 +      ssize_t (*transfer)(struct drm_dp_aux *aux,
 +                          struct drm_dp_aux_msg *msg);
 +};
 +
 +ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
 +                       void *buffer, size_t size);
 +ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
 +                        void *buffer, size_t size);
 +
 +/**
 + * drm_dp_dpcd_readb() - read a single byte from the DPCD
 + * @aux: DisplayPort AUX channel
 + * @offset: address of the register to read
 + * @valuep: location where the value of the register will be stored
 + *
 + * Returns the number of bytes transferred (1) on success, or a negative
 + * error code on failure.
 + */
 +static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
 +                                      unsigned int offset, u8 *valuep)
 +{
 +      return drm_dp_dpcd_read(aux, offset, valuep, 1);
 +}
 +
 +/**
 + * drm_dp_dpcd_writeb() - write a single byte to the DPCD
 + * @aux: DisplayPort AUX channel
 + * @offset: address of the register to write
 + * @value: value to write to the register
 + *
 + * Returns the number of bytes transferred (1) on success, or a negative
 + * error code on failure.
 + */
 +static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
 +                                       unsigned int offset, u8 value)
 +{
 +      return drm_dp_dpcd_write(aux, offset, &value, 1);
 +}
 +
 +int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
 +                               u8 status[DP_LINK_STATUS_SIZE]);
 +
 +/*
 + * DisplayPort link
 + */
 +#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
 +
 +struct drm_dp_link {
 +      unsigned char revision;
 +      unsigned int rate;
 +      unsigned int num_lanes;
 +      unsigned long capabilities;
 +};
 +
 +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
 +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
 +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
 +
 +int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux);
 +void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux);
 +
  #endif /* _DRM_DP_HELPER_H_ */