]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_drv.h
drm/i915: Rearrange i915_wait_request() accounting with callers
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_drv.h
index 8b9ee4e390c0a1fac6d6449c2ceb2f4f51ca3364..cf4b2427aff31288ee692dc2d8336cd1ff1d6590 100644 (file)
@@ -70,7 +70,8 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20160919"
+#define DRIVER_DATE            "20161024"
+#define DRIVER_TIMESTAMP       1477290335
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -185,6 +186,7 @@ enum plane {
 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
 
 enum port {
+       PORT_NONE = -1,
        PORT_A = 0,
        PORT_B,
        PORT_C,
@@ -310,7 +312,7 @@ struct i915_hotplug {
 #define for_each_pipe_masked(__dev_priv, __p, __mask) \
        for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
                for_each_if ((__mask) & (1 << (__p)))
-#define for_each_plane(__dev_priv, __pipe, __p)                                \
+#define for_each_universal_plane(__dev_priv, __pipe, __p)              \
        for ((__p) = 0;                                                 \
             (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
             (__p)++)
@@ -581,13 +583,25 @@ struct intel_uncore_funcs {
                                uint32_t val, bool trace);
 };
 
+struct intel_forcewake_range {
+       u32 start;
+       u32 end;
+
+       enum forcewake_domains domains;
+};
+
 struct intel_uncore {
        spinlock_t lock; /** lock is also taken in irq contexts. */
 
+       const struct intel_forcewake_range *fw_domains_table;
+       unsigned int fw_domains_table_entries;
+
        struct intel_uncore_funcs funcs;
 
        unsigned fifo_count;
+
        enum forcewake_domains fw_domains;
+       enum forcewake_domains fw_domains_active;
 
        struct intel_uncore_forcewake_domain {
                struct drm_i915_private *i915;
@@ -633,54 +647,53 @@ struct intel_csr {
        uint32_t allowed_dc_mask;
 };
 
-#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
-       func(is_mobile) sep \
-       func(is_i85x) sep \
-       func(is_i915g) sep \
-       func(is_i945gm) sep \
-       func(is_g33) sep \
-       func(hws_needs_physical) sep \
-       func(is_g4x) sep \
-       func(is_pineview) sep \
-       func(is_broadwater) sep \
-       func(is_crestline) sep \
-       func(is_ivybridge) sep \
-       func(is_valleyview) sep \
-       func(is_cherryview) sep \
-       func(is_haswell) sep \
-       func(is_broadwell) sep \
-       func(is_skylake) sep \
-       func(is_broxton) sep \
-       func(is_kabylake) sep \
-       func(is_preliminary) sep \
-       func(has_fbc) sep \
-       func(has_psr) sep \
-       func(has_runtime_pm) sep \
-       func(has_csr) sep \
-       func(has_resource_streamer) sep \
-       func(has_rc6) sep \
-       func(has_rc6p) sep \
-       func(has_dp_mst) sep \
-       func(has_gmbus_irq) sep \
-       func(has_hw_contexts) sep \
-       func(has_logical_ring_contexts) sep \
-       func(has_l3_dpf) sep \
-       func(has_gmch_display) sep \
-       func(has_guc) sep \
-       func(has_pipe_cxsr) sep \
-       func(has_hotplug) sep \
-       func(cursor_needs_physical) sep \
-       func(has_overlay) sep \
-       func(overlay_needs_physical) sep \
-       func(supports_tv) sep \
-       func(has_llc) sep \
-       func(has_snoop) sep \
-       func(has_ddi) sep \
-       func(has_fpga_dbg) sep \
-       func(has_pooled_eu)
-
-#define DEFINE_FLAG(name) u8 name:1
-#define SEP_SEMICOLON ;
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+       /* Keep is_* in chronological order */ \
+       func(is_mobile); \
+       func(is_i85x); \
+       func(is_i915g); \
+       func(is_i945gm); \
+       func(is_g33); \
+       func(is_g4x); \
+       func(is_pineview); \
+       func(is_broadwater); \
+       func(is_crestline); \
+       func(is_ivybridge); \
+       func(is_valleyview); \
+       func(is_cherryview); \
+       func(is_haswell); \
+       func(is_broadwell); \
+       func(is_skylake); \
+       func(is_broxton); \
+       func(is_kabylake); \
+       func(is_preliminary); \
+       /* Keep has_* in alphabetical order */ \
+       func(has_csr); \
+       func(has_ddi); \
+       func(has_dp_mst); \
+       func(has_fbc); \
+       func(has_fpga_dbg); \
+       func(has_gmbus_irq); \
+       func(has_gmch_display); \
+       func(has_guc); \
+       func(has_hotplug); \
+       func(has_hw_contexts); \
+       func(has_l3_dpf); \
+       func(has_llc); \
+       func(has_logical_ring_contexts); \
+       func(has_overlay); \
+       func(has_pipe_cxsr); \
+       func(has_pooled_eu); \
+       func(has_psr); \
+       func(has_rc6); \
+       func(has_rc6p); \
+       func(has_resource_streamer); \
+       func(has_runtime_pm); \
+       func(has_snoop); \
+       func(cursor_needs_physical); \
+       func(hws_needs_physical); \
+       func(overlay_needs_physical); \
+       func(supports_tv)
 
 struct sseu_dev_info {
        u8 slice_mask;
@@ -709,7 +722,9 @@ struct intel_device_info {
        u16 gen_mask;
        u8 ring_mask; /* Rings supported by the HW */
        u8 num_rings;
-       DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
+#define DEFINE_FLAG(name) u8 name:1
+       DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
        u16 ddb_size; /* in blocks */
        /* Register offsets for the various display pipes and transcoders */
        int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -726,14 +741,15 @@ struct intel_device_info {
        } color;
 };
 
-#undef DEFINE_FLAG
-#undef SEP_SEMICOLON
-
 struct intel_display_error_state;
 
 struct drm_i915_error_state {
        struct kref ref;
        struct timeval time;
+       struct timeval boottime;
+       struct timeval uptime;
+
+       struct drm_i915_private *i915;
 
        char error_msg[128];
        bool simulated;
@@ -759,11 +775,12 @@ struct drm_i915_error_state {
        u32 gam_ecochk;
        u32 gab_ctl;
        u32 gfx_mode;
-       u32 extra_instdone[I915_NUM_INSTDONE_REG];
+
        u64 fence[I915_MAX_NUM_FENCES];
        struct intel_overlay_error_state *overlay;
        struct intel_display_error_state *display;
        struct drm_i915_error_object *semaphore;
+       struct drm_i915_error_object *guc_log;
 
        struct drm_i915_error_engine {
                int engine_id;
@@ -775,6 +792,9 @@ struct drm_i915_error_state {
                struct i915_address_space *vm;
                int num_requests;
 
+               /* position of active request inside the ring */
+               u32 rq_head, rq_post, rq_tail;
+
                /* our own tracking of ring head and tail */
                u32 cpu_ring_head;
                u32 cpu_ring_tail;
@@ -791,7 +811,6 @@ struct drm_i915_error_state {
                u32 hws;
                u32 ipeir;
                u32 ipehr;
-               u32 instdone;
                u32 bbstate;
                u32 instpm;
                u32 instps;
@@ -802,11 +821,13 @@ struct drm_i915_error_state {
                u64 faddr;
                u32 rc_psmi; /* sleep state */
                u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
+               struct intel_instdone instdone;
 
                struct drm_i915_error_object {
-                       int page_count;
                        u64 gtt_offset;
                        u64 gtt_size;
+                       int page_count;
+                       int unused;
                        u32 *pages[0];
                } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
 
@@ -815,10 +836,11 @@ struct drm_i915_error_state {
                struct drm_i915_error_request {
                        long jiffies;
                        pid_t pid;
+                       u32 context;
                        u32 seqno;
                        u32 head;
                        u32 tail;
-               } *requests;
+               } *requests, execlist[2];
 
                struct drm_i915_error_waiter {
                        char comm[TASK_COMM_LEN];
@@ -972,6 +994,9 @@ struct intel_fbc {
        bool enabled;
        bool active;
 
+       bool underrun_detected;
+       struct work_struct underrun_work;
+
        struct intel_fbc_state_cache {
                struct {
                        unsigned int mode_flags;
@@ -1297,6 +1322,12 @@ struct i915_power_well {
        /* cached hw enabled state */
        bool hw_enabled;
        unsigned long domains;
+       /* unique identifier for this power well */
+       unsigned long id;
+       /*
+        * Arbitraty data associated with this power well. Platform and power
+        * well specific.
+        */
        unsigned long data;
        const struct i915_power_well_ops *ops;
 };
@@ -1339,6 +1370,11 @@ struct i915_gem_mm {
         */
        struct list_head unbound_list;
 
+       /** List of all objects in gtt_space, currently mmaped by userspace.
+        * All objects within this list must also be on bound_list.
+        */
+       struct list_head userfault_list;
+
        /** Usable portion of the GTT for GEM */
        unsigned long stolen_base; /* limited to low memory (32-bit) */
 
@@ -1368,7 +1404,7 @@ struct i915_gem_mm {
 
        /* accounting, useful for userland debugging */
        spinlock_t object_stat_lock;
-       size_t object_memory;
+       u64 object_memory;
        u32 object_count;
 };
 
@@ -1387,6 +1423,9 @@ struct i915_error_state_file_priv {
        struct drm_i915_error_state *error;
 };
 
+#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
+#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
+
 struct i915_gpu_error {
        /* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -1620,7 +1659,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
 }
 
 struct skl_ddb_allocation {
-       struct skl_ddb_entry pipe[I915_MAX_PIPES];
        struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
        struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
 };
@@ -1628,15 +1666,12 @@ struct skl_ddb_allocation {
 struct skl_wm_values {
        unsigned dirty_pipes;
        struct skl_ddb_allocation ddb;
-       uint32_t wm_linetime[I915_MAX_PIPES];
-       uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
-       uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
 };
 
 struct skl_wm_level {
-       bool plane_en[I915_MAX_PLANES];
-       uint16_t plane_res_b[I915_MAX_PLANES];
-       uint8_t plane_res_l[I915_MAX_PLANES];
+       bool plane_en;
+       uint16_t plane_res_b;
+       uint8_t plane_res_l;
 };
 
 /*
@@ -1664,7 +1699,6 @@ struct skl_wm_level {
  */
 struct i915_runtime_pm {
        atomic_t wakeref_count;
-       atomic_t atomic_seq;
        bool suspended;
        bool irqs_enabled;
 };
@@ -1759,7 +1793,7 @@ struct drm_i915_private {
 
        struct i915_virtual_gpu vgpu;
 
-       struct intel_gvt gvt;
+       struct intel_gvt *gvt;
 
        struct intel_guc guc;
 
@@ -1787,7 +1821,7 @@ struct drm_i915_private {
 
        struct pci_dev *bridge_dev;
        struct i915_gem_context *kernel_context;
-       struct intel_engine_cs engine[I915_NUM_ENGINES];
+       struct intel_engine_cs *engine[I915_NUM_ENGINES];
        struct i915_vma *semaphore;
        u32 next_seqno;
 
@@ -1814,8 +1848,10 @@ struct drm_i915_private {
                u32 de_irq_mask[I915_MAX_PIPES];
        };
        u32 gt_irq_mask;
-       u32 pm_irq_mask;
+       u32 pm_imr;
+       u32 pm_ier;
        u32 pm_rps_events;
+       u32 pm_guc_events;
        u32 pipestat_irq_mask[I915_MAX_PIPES];
 
        struct i915_hotplug hotplug;
@@ -2074,12 +2110,15 @@ struct drm_i915_private {
                 * off the idle_work.
                 */
                struct delayed_work idle_work;
+
+               ktime_t last_init_time;
        } gt;
 
        /* perform PHY state sanity checks? */
        bool chv_phy_assert[2];
 
-       struct intel_encoder *dig_port_map[I915_MAX_PORTS];
+       /* Used to save the pipe-to-encoder mapping for audio */
+       struct intel_encoder *av_enc_map[I915_MAX_PIPES];
 
        /*
         * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
@@ -2103,19 +2142,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
 }
 
 /* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, dev_priv__) \
-       for ((engine__) = &(dev_priv__)->engine[0]; \
-            (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
-            (engine__)++) \
-               for_each_if (intel_engine_initialized(engine__))
-
-/* Iterator with engine_id */
-#define for_each_engine_id(engine__, dev_priv__, id__) \
-       for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
-            (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
-            (engine__)++) \
-               for_each_if (((id__) = (engine__)->id, \
-                             intel_engine_initialized(engine__)))
+#define for_each_engine(engine__, dev_priv__, id__) \
+       for ((id__) = 0; \
+            (id__) < I915_NUM_ENGINES; \
+            (id__)++) \
+               for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
 
 #define __mask_next_bit(mask) ({                                       \
        int __idx = ffs(mask) - 1;                                      \
@@ -2126,7 +2157,7 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
 /* Iterator over subset of engines selected by mask */
 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
        for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask;        \
-            tmp__ ? (engine__ = &(dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
+            tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
 
 enum hdmi_force_audio {
        HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
@@ -2194,6 +2225,11 @@ struct drm_i915_gem_object {
        struct drm_mm_node *stolen;
        struct list_head global_list;
 
+       /**
+        * Whether the object is currently in the GGTT mmap.
+        */
+       struct list_head userfault_link;
+
        /** Used in execbuf to temporarily hold a ref */
        struct list_head obj_exec_link;
 
@@ -2221,13 +2257,6 @@ struct drm_i915_gem_object {
         */
        unsigned int madv:2;
 
-       /**
-        * Whether the current gtt mapping needs to be mappable (and isn't just
-        * mappable by accident). Track pin and fault separate for a more
-        * accurate mappable working set.
-        */
-       unsigned int fault_mappable:1;
-
        /*
         * Is the object to be mapped as read-only to the GPU
         * Only honoured if hardware has relevant pte bit
@@ -2586,8 +2615,9 @@ struct drm_i915_cmd_table {
        __p; \
 })
 #define INTEL_INFO(p)  (&__I915__(p)->info)
-#define INTEL_GEN(p)   (INTEL_INFO(p)->gen)
-#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+
+#define INTEL_GEN(dev_priv)    ((dev_priv)->info.gen)
+#define INTEL_DEVID(dev_priv)  ((dev_priv)->info.device_id)
 
 #define REVID_FOREVER          0xff
 #define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
@@ -2598,7 +2628,7 @@ struct drm_i915_cmd_table {
  *
  * Use GEN_FOREVER for unbound start and or end.
  */
-#define IS_GEN(p, s, e) ({ \
+#define IS_GEN(dev_priv, s, e) ({ \
        unsigned int __s = (s), __e = (e); \
        BUILD_BUG_ON(!__builtin_constant_p(s)); \
        BUILD_BUG_ON(!__builtin_constant_p(e)); \
@@ -2608,7 +2638,7 @@ struct drm_i915_cmd_table {
                __e = BITS_PER_LONG - 1; \
        else \
                __e = (e) - 1; \
-       !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+       !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
 })
 
 /*
@@ -2619,73 +2649,73 @@ struct drm_i915_cmd_table {
 #define IS_REVID(p, since, until) \
        (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
 
-#define IS_I830(dev)           (INTEL_DEVID(dev) == 0x3577)
-#define IS_845G(dev)           (INTEL_DEVID(dev) == 0x2562)
+#define IS_I830(dev_priv)      (INTEL_DEVID(dev_priv) == 0x3577)
+#define IS_845G(dev_priv)      (INTEL_DEVID(dev_priv) == 0x2562)
 #define IS_I85X(dev)           (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev)          (INTEL_DEVID(dev) == 0x2572)
+#define IS_I865G(dev_priv)     (INTEL_DEVID(dev_priv) == 0x2572)
 #define IS_I915G(dev)          (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev)         (INTEL_DEVID(dev) == 0x2592)
-#define IS_I945G(dev)          (INTEL_DEVID(dev) == 0x2772)
+#define IS_I915GM(dev_priv)    (INTEL_DEVID(dev_priv) == 0x2592)
+#define IS_I945G(dev_priv)     (INTEL_DEVID(dev_priv) == 0x2772)
 #define IS_I945GM(dev)         (INTEL_INFO(dev)->is_i945gm)
 #define IS_BROADWATER(dev)     (INTEL_INFO(dev)->is_broadwater)
 #define IS_CRESTLINE(dev)      (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev)           (INTEL_DEVID(dev) == 0x2A42)
-#define IS_G4X(dev)            (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev)     (INTEL_DEVID(dev) == 0xa001)
-#define IS_PINEVIEW_M(dev)     (INTEL_DEVID(dev) == 0xa011)
+#define IS_GM45(dev_priv)      (INTEL_DEVID(dev_priv) == 0x2A42)
+#define IS_G4X(dev_priv)       ((dev_priv)->info.is_g4x)
+#define IS_PINEVIEW_G(dev_priv)        (INTEL_DEVID(dev_priv) == 0xa001)
+#define IS_PINEVIEW_M(dev_priv)        (INTEL_DEVID(dev_priv) == 0xa011)
 #define IS_PINEVIEW(dev)       (INTEL_INFO(dev)->is_pineview)
 #define IS_G33(dev)            (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev)     (INTEL_DEVID(dev) == 0x0046)
-#define IS_IVYBRIDGE(dev)      (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev)                (INTEL_DEVID(dev) == 0x0156 || \
-                                INTEL_DEVID(dev) == 0x0152 || \
-                                INTEL_DEVID(dev) == 0x015a)
-#define IS_VALLEYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview)
-#define IS_CHERRYVIEW(dev)     (INTEL_INFO(dev)->is_cherryview)
-#define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev)      (INTEL_INFO(dev)->is_broadwell)
-#define IS_SKYLAKE(dev)        (INTEL_INFO(dev)->is_skylake)
-#define IS_BROXTON(dev)                (INTEL_INFO(dev)->is_broxton)
-#define IS_KABYLAKE(dev)       (INTEL_INFO(dev)->is_kabylake)
+#define IS_IRONLAKE_M(dev_priv)        (INTEL_DEVID(dev_priv) == 0x0046)
+#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
+#define IS_IVB_GT1(dev_priv)   (INTEL_DEVID(dev_priv) == 0x0156 || \
+                                INTEL_DEVID(dev_priv) == 0x0152 || \
+                                INTEL_DEVID(dev_priv) == 0x015a)
+#define IS_VALLEYVIEW(dev_priv)        ((dev_priv)->info.is_valleyview)
+#define IS_CHERRYVIEW(dev_priv)        ((dev_priv)->info.is_cherryview)
+#define IS_HASWELL(dev_priv)   ((dev_priv)->info.is_haswell)
+#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
+#define IS_SKYLAKE(dev_priv)   ((dev_priv)->info.is_skylake)
+#define IS_BROXTON(dev_priv)   ((dev_priv)->info.is_broxton)
+#define IS_KABYLAKE(dev_priv)  ((dev_priv)->info.is_kabylake)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
-#define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
-                                (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev)                (IS_BROADWELL(dev) && \
-                                ((INTEL_DEVID(dev) & 0xf) == 0x6 ||    \
-                                (INTEL_DEVID(dev) & 0xf) == 0xb ||     \
-                                (INTEL_DEVID(dev) & 0xf) == 0xe))
+#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
+                                   (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
+#define IS_BDW_ULT(dev_priv)   (IS_BROADWELL(dev_priv) && \
+                                ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 ||       \
+                                (INTEL_DEVID(dev_priv) & 0xf) == 0xb ||        \
+                                (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
 /* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev)                (IS_BROADWELL(dev) && \
-                                (INTEL_DEVID(dev) & 0xf) == 0xe)
-#define IS_BDW_GT3(dev)                (IS_BROADWELL(dev) && \
-                                (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_HSW_ULT(dev)                (IS_HASWELL(dev) && \
-                                (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
-#define IS_HSW_GT3(dev)                (IS_HASWELL(dev) && \
-                                (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+#define IS_BDW_ULX(dev_priv)   (IS_BROADWELL(dev_priv) && \
+                                (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_GT3(dev_priv)   (IS_BROADWELL(dev_priv) && \
+                                (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_HSW_ULT(dev_priv)   (IS_HASWELL(dev_priv) && \
+                                (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev_priv)   (IS_HASWELL(dev_priv) && \
+                                (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
 /* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev)                (INTEL_DEVID(dev) == 0x0A0E || \
-                                INTEL_DEVID(dev) == 0x0A1E)
-#define IS_SKL_ULT(dev)                (INTEL_DEVID(dev) == 0x1906 || \
-                                INTEL_DEVID(dev) == 0x1913 || \
-                                INTEL_DEVID(dev) == 0x1916 || \
-                                INTEL_DEVID(dev) == 0x1921 || \
-                                INTEL_DEVID(dev) == 0x1926)
-#define IS_SKL_ULX(dev)                (INTEL_DEVID(dev) == 0x190E || \
-                                INTEL_DEVID(dev) == 0x1915 || \
-                                INTEL_DEVID(dev) == 0x191E)
-#define IS_KBL_ULT(dev)                (INTEL_DEVID(dev) == 0x5906 || \
-                                INTEL_DEVID(dev) == 0x5913 || \
-                                INTEL_DEVID(dev) == 0x5916 || \
-                                INTEL_DEVID(dev) == 0x5921 || \
-                                INTEL_DEVID(dev) == 0x5926)
-#define IS_KBL_ULX(dev)                (INTEL_DEVID(dev) == 0x590E || \
-                                INTEL_DEVID(dev) == 0x5915 || \
-                                INTEL_DEVID(dev) == 0x591E)
-#define IS_SKL_GT3(dev)                (IS_SKYLAKE(dev) && \
-                                (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
-#define IS_SKL_GT4(dev)                (IS_SKYLAKE(dev) && \
-                                (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
+#define IS_HSW_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x0A0E || \
+                                INTEL_DEVID(dev_priv) == 0x0A1E)
+#define IS_SKL_ULT(dev_priv)   (INTEL_DEVID(dev_priv) == 0x1906 || \
+                                INTEL_DEVID(dev_priv) == 0x1913 || \
+                                INTEL_DEVID(dev_priv) == 0x1916 || \
+                                INTEL_DEVID(dev_priv) == 0x1921 || \
+                                INTEL_DEVID(dev_priv) == 0x1926)
+#define IS_SKL_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x190E || \
+                                INTEL_DEVID(dev_priv) == 0x1915 || \
+                                INTEL_DEVID(dev_priv) == 0x191E)
+#define IS_KBL_ULT(dev_priv)   (INTEL_DEVID(dev_priv) == 0x5906 || \
+                                INTEL_DEVID(dev_priv) == 0x5913 || \
+                                INTEL_DEVID(dev_priv) == 0x5916 || \
+                                INTEL_DEVID(dev_priv) == 0x5921 || \
+                                INTEL_DEVID(dev_priv) == 0x5926)
+#define IS_KBL_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x590E || \
+                                INTEL_DEVID(dev_priv) == 0x5915 || \
+                                INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_SKL_GT3(dev_priv)   (IS_SKYLAKE(dev_priv) && \
+                                (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+#define IS_SKL_GT4(dev_priv)   (IS_SKYLAKE(dev_priv) && \
+                                (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
 
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
@@ -2705,7 +2735,8 @@ struct drm_i915_cmd_table {
 #define BXT_REVID_B0           0x3
 #define BXT_REVID_C0           0x9
 
-#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define IS_BXT_REVID(dev_priv, since, until) \
+       (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
 
 #define KBL_REVID_A0           0x0
 #define KBL_REVID_B0           0x1
@@ -2713,8 +2744,8 @@ struct drm_i915_cmd_table {
 #define KBL_REVID_D0           0x3
 #define KBL_REVID_E0           0x4
 
-#define IS_KBL_REVID(p, since, until) \
-       (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+#define IS_KBL_REVID(dev_priv, since, until) \
+       (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
 
 /*
  * The genX designation typically refers to the render engine, so render
@@ -2722,14 +2753,14 @@ struct drm_i915_cmd_table {
  * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
  * chips, etc.).
  */
-#define IS_GEN2(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
-#define IS_GEN3(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
-#define IS_GEN4(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
-#define IS_GEN5(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
-#define IS_GEN6(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
-#define IS_GEN7(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
-#define IS_GEN8(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
-#define IS_GEN9(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+#define IS_GEN2(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(1)))
+#define IS_GEN3(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(2)))
+#define IS_GEN4(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(3)))
+#define IS_GEN5(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(4)))
+#define IS_GEN6(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(5)))
+#define IS_GEN7(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(6)))
+#define IS_GEN8(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(7)))
+#define IS_GEN9(dev_priv)      (!!((dev_priv)->info.gen_mask & BIT(8)))
 
 #define ENGINE_MASK(id)        BIT(id)
 #define RENDER_RING    ENGINE_MASK(RCS)
@@ -2750,8 +2781,8 @@ struct drm_i915_cmd_table {
 #define HAS_LLC(dev)           (INTEL_INFO(dev)->has_llc)
 #define HAS_SNOOP(dev)         (INTEL_INFO(dev)->has_snoop)
 #define HAS_EDRAM(dev)         (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
-#define HAS_WT(dev)            ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
-                                HAS_EDRAM(dev))
+#define HAS_WT(dev_priv)       ((IS_HASWELL(dev_priv) || \
+                                IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
 #define HWS_NEEDS_PHYSICAL(dev)        (INTEL_INFO(dev)->hws_needs_physical)
 
 #define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->has_hw_contexts)
@@ -2764,7 +2795,7 @@ struct drm_i915_cmd_table {
 #define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev)         (IS_I830(dev) || IS_845G(dev))
+#define HAS_BROKEN_CS_TLB(dev_priv)    (IS_I830(dev_priv) || IS_845G(dev_priv))
 
 /* WaRsDisableCoarsePowerGating:skl,bxt */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -2784,8 +2815,9 @@ struct drm_i915_cmd_table {
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
  */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
-                                                     IS_I915GM(dev)))
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+                                        !(IS_I915G(dev_priv) || \
+                                        IS_I915GM(dev_priv)))
 #define SUPPORTS_TV(dev)               (INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)           (INTEL_INFO(dev)->has_hotplug)
 
@@ -2793,19 +2825,19 @@ struct drm_i915_cmd_table {
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
-#define HAS_IPS(dev)           (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_IPS(dev_priv)      (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
 
 #define HAS_DP_MST(dev)        (INTEL_INFO(dev)->has_dp_mst)
 
-#define HAS_DDI(dev)           (INTEL_INFO(dev)->has_ddi)
+#define HAS_DDI(dev_priv)      ((dev_priv)->info.has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev)    (INTEL_INFO(dev)->has_fpga_dbg)
 #define HAS_PSR(dev)           (INTEL_INFO(dev)->has_psr)
-#define HAS_RUNTIME_PM(dev)    (INTEL_INFO(dev)->has_runtime_pm)
 #define HAS_RC6(dev)           (INTEL_INFO(dev)->has_rc6)
 #define HAS_RC6p(dev)          (INTEL_INFO(dev)->has_rc6p)
 
 #define HAS_CSR(dev)   (INTEL_INFO(dev)->has_csr)
 
+#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
 /*
  * For now, anything with a GuC requires uCode loading, and then supports
  * command submission once loaded. But these are logically independent
@@ -2832,22 +2864,27 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_P3X_DEVICE_ID_TYPE           0x7000
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE          0x2900 /* qemu q35 has 2918 */
 
-#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
-#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
-#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
-#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
-#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
+#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
+#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
+#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev_priv) \
+       ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev_priv) \
+       ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
+#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
+#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
+#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
 
-#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+
+#define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv))
 
 /* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev) (INTEL_INFO(dev)->has_l3_dpf)
-#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
+#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
+                                2 : HAS_L3_DPF(dev_priv))
 
 #define GT_FREQUENCY_MULTIPLIER 50
 #define GEN9_FREQ_SCALER 3
@@ -2883,6 +2920,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
 #endif
+extern const struct dev_pm_ops i915_pm_ops;
+
+extern int i915_driver_load(struct pci_dev *pdev,
+                           const struct pci_device_id *ent);
+extern void i915_driver_unload(struct drm_device *dev);
 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
 extern void i915_reset(struct drm_i915_private *dev_priv);
@@ -2969,7 +3011,7 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
 
 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
 {
-       return dev_priv->gvt.initialized;
+       return dev_priv->gvt;
 }
 
 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
@@ -3082,7 +3124,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                         const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
-                                                 size_t size);
+                                                  u64 size);
 struct drm_i915_gem_object *i915_gem_object_create_from_data(
                struct drm_device *dev, const void *data, size_t size);
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
@@ -3104,9 +3146,10 @@ void i915_vma_destroy(struct i915_vma *vma);
 
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
+
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __sg_page_count(struct scatterlist *sg)
@@ -3156,14 +3199,15 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
 
 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-       BUG_ON(obj->pages == NULL);
+       GEM_BUG_ON(obj->pages == NULL);
        obj->pages_pin_count++;
 }
 
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
-       BUG_ON(obj->pages_pin_count == 0);
+       GEM_BUG_ON(obj->pages_pin_count == 0);
        obj->pages_pin_count--;
+       GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
 }
 
 enum i915_map_type {
@@ -3275,9 +3319,10 @@ int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void i915_gem_resume(struct drm_device *dev);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int __must_check
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-                              bool readonly);
+int i915_gem_object_wait(struct drm_i915_gem_object *obj,
+                        unsigned int flags,
+                        long timeout,
+                        struct intel_rps_client *rps);
 int __must_check
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
                                  bool write);
@@ -3521,6 +3566,8 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
 #endif
 
 /* i915_gpu_error.c */
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
 __printf(2, 3)
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
@@ -3541,7 +3588,20 @@ void i915_error_state_get(struct drm_device *dev,
 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
 void i915_destroy_error_state(struct drm_device *dev);
 
-void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
+#else
+
+static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
+                                           u32 engine_mask,
+                                           const char *error_msg)
+{
+}
+
+static inline void i915_destroy_error_state(struct drm_device *dev)
+{
+}
+
+#endif
+
 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 
 /* i915_cmd_parser.c */
@@ -3591,6 +3651,9 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por
 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
                                     enum port port);
+bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+                               enum port port);
+
 
 /* intel_opregion.c */
 #ifdef CONFIG_ACPI
@@ -3702,6 +3765,23 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 
 /* intel_dpio_phy.c */
+void bxt_port_to_phy_channel(enum port port,
+                            enum dpio_phy *phy, enum dpio_channel *ch);
+void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
+                                 enum port port, u32 margin, u32 scale,
+                                 u32 enable, u32 deemphasis);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+                           enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+                             enum dpio_phy phy);
+uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+                                            uint8_t lane_count);
+void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
+                                    uint8_t lane_lat_optim_mask);
+uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+
 void chv_set_phy_signal_level(struct intel_encoder *encoder,
                              u32 deemph_reg_value, u32 margin_reg_value,
                              bool uniq_trans_scale);
@@ -3791,11 +3871,30 @@ __raw_write(64, q)
 #undef __raw_write
 
 /* These are untraced mmio-accessors that are only valid to be used inside
- * critical sections inside IRQ handlers where forcewake is explicitly
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
  * controlled.
+ *
  * Think twice, and think again, before using these.
- * Note: Should only be used between intel_uncore_forcewake_irqlock() and
- * intel_uncore_forcewake_irqunlock().
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&dev_priv->uncore.lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&dev_priv->uncore.lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
  */
 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
@@ -3807,11 +3906,11 @@ __raw_write(64, q)
 #define INTEL_BROADCAST_RGB_FULL 1
 #define INTEL_BROADCAST_RGB_LIMITED 2
 
-static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
+static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
 {
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                return VLV_VGACNTRL;
-       else if (INTEL_INFO(dev)->gen >= 5)
+       else if (INTEL_GEN(dev_priv) >= 5)
                return CPU_VGACNTRL;
        else
                return VGACNTRL;