]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'virtio-gpu-for-drm-next' of git://git.kraxel.org/linux into drm-next
authorDave Airlie <airlied@redhat.com>
Sun, 18 Oct 2015 21:16:15 +0000 (07:16 +1000)
committerDave Airlie <airlied@redhat.com>
Sun, 18 Oct 2015 21:16:15 +0000 (07:16 +1000)
Add 3D support to the virtio-gpu.

* 'virtio-gpu-for-drm-next' of git://git.kraxel.org/linux:
  virtio-gpu: add page flip support
  virtio-gpu: mark as a render gpu
  virtio-gpu: add basic prime support
  virtio-gpu: add 3d/virgl support
  virtio-gpu: don't free things on ttm_bo_init failure
  virtio-gpu: wait for cursor updates finish
  virtio-gpu: add & use virtio_gpu_queue_fenced_ctrl_buffer
  virtio-gpu: add virtio_gpu_queue_ctrl_buffer_locked

240 files changed:
Documentation/DocBook/drm.tmpl
Documentation/kernel-parameters.txt
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/atom.h
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/armada/Kconfig
drivers/gpu/drm/armada/Makefile
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/armada/armada_crtc.h
drivers/gpu/drm/armada/armada_drm.h
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/armada/armada_output.c [deleted file]
drivers/gpu/drm/armada/armada_output.h [deleted file]
drivers/gpu/drm/armada/armada_overlay.c
drivers/gpu/drm/armada/armada_slave.c [deleted file]
drivers/gpu/drm/armada/armada_slave.h [deleted file]
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/Makefile
drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c [new file with mode: 0644]
drivers/gpu/drm/bridge/dw_hdmi-audio.h [new file with mode: 0644]
drivers/gpu/drm/bridge/dw_hdmi.c
drivers/gpu/drm/bridge/dw_hdmi.h
drivers/gpu/drm/drm_agpsupport.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_memory.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_modeset_lock.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_platform.c
drivers/gpu/drm/drm_rect.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/gma500/psb_irq.c
drivers/gpu/drm/gma500/psb_irq.h
drivers/gpu/drm/i2c/ch7006_drv.c
drivers/gpu/drm/i2c/ch7006_mode.c
drivers/gpu/drm/i2c/ch7006_priv.h
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_reg.h
drivers/gpu/drm/i915/i915_guc_submission.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vgpu.h
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_pll.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_guc.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_loader.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/mga/mga_dma.c
drivers/gpu/drm/mga/mga_drv.h
drivers/gpu/drm/mga/mga_irq.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_irq.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/qxl/qxl_fb.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/r128/r128_cce.c
drivers/gpu/drm/r128/r128_drv.h
drivers/gpu/drm/r128/r128_irq.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/radeon_acpi.c
drivers/gpu/drm/radeon/radeon_agp.c
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_cp.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_irq.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/sis/sis_drv.h
drivers/gpu/drm/sti/sti_crtc.c
drivers/gpu/drm/sti/sti_crtc.h
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dpaux.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/via/via_drv.h
drivers/gpu/drm/via/via_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/ipu-v3/ipu-dc.c
drivers/gpu/ipu-v3/ipu-di.c
drivers/gpu/vga/vga_switcheroo.c
drivers/gpu/vga/vgaarb.c
include/drm/drmP.h
include/drm/drm_agpsupport.h
include/drm/drm_atomic_helper.h
include/drm/drm_crtc.h
include/drm/drm_dp_helper.h
include/drm/drm_edid.h
include/drm/drm_fb_helper.h
include/drm/drm_modeset_lock.h
include/drm/drm_plane_helper.h
include/linux/fb.h
include/linux/vga_switcheroo.h
include/uapi/drm/drm_mode.h
include/uapi/drm/sis_drm.h
include/uapi/drm/via_drm.h
sound/pci/hda/hda_intel.c

index 9ddf8c6cb88791e1f0f5a12629cd9ce5be30f2e7..a34fa4705ebfb77e48d76f80df6d1fcad8591470 100644 (file)
     <para>
       At the core of every DRM driver is a <structname>drm_driver</structname>
       structure. Drivers typically statically initialize a drm_driver structure,
-      and then pass it to one of the <function>drm_*_init()</function> functions
-      to register it with the DRM subsystem.
-    </para>
-    <para>
-      Newer drivers that no longer require a <structname>drm_bus</structname>
-      structure can alternatively use the low-level device initialization and
-      registration functions such as <function>drm_dev_alloc()</function> and
-      <function>drm_dev_register()</function> directly.
+      and then pass it to <function>drm_dev_alloc()</function> to allocate a
+      device instance. After the device instance is fully initialized it can be
+      registered (which makes it accessible from userspace) using
+      <function>drm_dev_register()</function>.
     </para>
     <para>
       The <structname>drm_driver</structname> structure contains static
@@ -296,83 +292,12 @@ char *date;</synopsis>
       </sect3>
     </sect2>
     <sect2>
-      <title>Device Registration</title>
-      <para>
-        A number of functions are provided to help with device registration.
-        The functions deal with PCI and platform devices, respectively.
-      </para>
-!Edrivers/gpu/drm/drm_pci.c
-!Edrivers/gpu/drm/drm_platform.c
-      <para>
-        New drivers that no longer rely on the services provided by the
-        <structname>drm_bus</structname> structure can call the low-level
-        device registration functions directly. The
-        <function>drm_dev_alloc()</function> function can be used to allocate
-        and initialize a new <structname>drm_device</structname> structure.
-        Drivers will typically want to perform some additional setup on this
-        structure, such as allocating driver-specific data and storing a
-        pointer to it in the DRM device's <structfield>dev_private</structfield>
-        field. Drivers should also set the device's unique name using the
-        <function>drm_dev_set_unique()</function> function. After it has been
-        set up a device can be registered with the DRM subsystem by calling
-        <function>drm_dev_register()</function>. This will cause the device to
-        be exposed to userspace and will call the driver's
-        <structfield>.load()</structfield> implementation. When a device is
-        removed, the DRM device can safely be unregistered and freed by calling
-        <function>drm_dev_unregister()</function> followed by a call to
-        <function>drm_dev_unref()</function>.
-      </para>
+      <title>Device Instance and Driver Handling</title>
+!Pdrivers/gpu/drm/drm_drv.c driver instance overview
 !Edrivers/gpu/drm/drm_drv.c
     </sect2>
     <sect2>
       <title>Driver Load</title>
-      <para>
-        The <methodname>load</methodname> method is the driver and device
-        initialization entry point. The method is responsible for allocating and
-       initializing driver private data, performing resource allocation and
-       mapping (e.g. acquiring
-        clocks, mapping registers or allocating command buffers), initializing
-        the memory manager (<xref linkend="drm-memory-management"/>), installing
-        the IRQ handler (<xref linkend="drm-irq-registration"/>), setting up
-        vertical blanking handling (<xref linkend="drm-vertical-blank"/>), mode
-       setting (<xref linkend="drm-mode-setting"/>) and initial output
-       configuration (<xref linkend="drm-kms-init"/>).
-      </para>
-      <note><para>
-        If compatibility is a concern (e.g. with drivers converted over from
-        User Mode Setting to Kernel Mode Setting), care must be taken to prevent
-        device initialization and control that is incompatible with currently
-        active userspace drivers. For instance, if user level mode setting
-        drivers are in use, it would be problematic to perform output discovery
-        &amp; configuration at load time. Likewise, if user-level drivers
-        unaware of memory management are in use, memory management and command
-        buffer setup may need to be omitted. These requirements are
-        driver-specific, and care needs to be taken to keep both old and new
-        applications and libraries working.
-      </para></note>
-      <synopsis>int (*load) (struct drm_device *, unsigned long flags);</synopsis>
-      <para>
-        The method takes two arguments, a pointer to the newly created
-       <structname>drm_device</structname> and flags. The flags are used to
-       pass the <structfield>driver_data</structfield> field of the device id
-       corresponding to the device passed to <function>drm_*_init()</function>.
-       Only PCI devices currently use this, USB and platform DRM drivers have
-       their <methodname>load</methodname> method called with flags to 0.
-      </para>
-      <sect3>
-        <title>Driver Private Data</title>
-        <para>
-          The driver private hangs off the main
-          <structname>drm_device</structname> structure and can be used for
-          tracking various device-specific bits of information, like register
-          offsets, command buffer status, register state for suspend/resume, etc.
-          At load time, a driver may simply allocate one and set
-          <structname>drm_device</structname>.<structfield>dev_priv</structfield>
-          appropriately; it should be freed and
-          <structname>drm_device</structname>.<structfield>dev_priv</structfield>
-          set to NULL when the driver is unloaded.
-        </para>
-      </sect3>
       <sect3 id="drm-irq-registration">
         <title>IRQ Registration</title>
         <para>
@@ -465,6 +390,18 @@ char *date;</synopsis>
         </para>
       </sect3>
     </sect2>
+    <sect2>
+      <title>Bus-specific Device Registration and PCI Support</title>
+      <para>
+        A number of functions are provided to help with device registration.
+       The functions deal with PCI and platform devices respectively and are
+       only provided for historical reasons. These are all deprecated and
+       shouldn't be used in new drivers. Besides that there's a few
+       helpers for pci drivers.
+      </para>
+!Edrivers/gpu/drm/drm_pci.c
+!Edrivers/gpu/drm/drm_platform.c
+    </sect2>
   </sect1>
 
   <!-- Internals: memory management -->
@@ -3646,7 +3583,7 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
        plane properties to default value, so that a subsequent open of the
        device will not inherit state from the previous user. It can also be
        used to execute delayed power switching state changes, e.g. in
-       conjunction with the vga-switcheroo infrastructure. Beyond that KMS
+       conjunction with the vga_switcheroo infrastructure. Beyond that KMS
        drivers should not do any further cleanup. Only legacy UMS drivers might
        need to clean up device state so that the vga console or an independent
        fbdev driver could take over.
@@ -3752,6 +3689,7 @@ int num_ioctls;</synopsis>
          </itemizedlist>
        </para>
       </para>
+!Edrivers/gpu/drm/drm_ioctl.c
     </sect2>
   </sect1>
   <sect1>
@@ -4237,6 +4175,20 @@ int num_ioctls;</synopsis>
 !Idrivers/gpu/drm/i915/i915_gem_shrinker.c
       </sect2>
     </sect1>
+    <sect1>
+      <title>GuC-based Command Submission</title>
+      <sect2>
+        <title>GuC</title>
+!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
+!Idrivers/gpu/drm/i915/intel_guc_loader.c
+      </sect2>
+      <sect2>
+        <title>GuC Client</title>
+!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison
+!Idrivers/gpu/drm/i915/i915_guc_submission.c
+      </sect2>
+    </sect1>
+
     <sect1>
       <title> Tracing </title>
       <para>
index 22a4b687ea5b4b3cb9d576bfeffaed813256a795..c6dd5f350684eb10c65fd97998c410d47b515430 100644 (file)
@@ -929,11 +929,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        The filter can be disabled or changed to another
                        driver later using sysfs.
 
-       drm_kms_helper.edid_firmware=[<connector>:]<file>
-                       Broken monitors, graphic adapters and KVMs may
-                       send no or incorrect EDID data sets. This parameter
-                       allows to specify an EDID data set in the
-                       /lib/firmware directory that is used instead.
+       drm_kms_helper.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
+                       Broken monitors, graphic adapters, KVMs and EDIDless
+                       panels may send no or incorrect EDID data sets.
+                       This parameter allows to specify an EDID data sets
+                       in the /lib/firmware directory that are used instead.
                        Generic built-in EDID data sets are used, if one of
                        edid/1024x768.bin, edid/1280x1024.bin,
                        edid/1680x1050.bin, or edid/1920x1080.bin is given
@@ -942,7 +942,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        available in Documentation/EDID/HOWTO.txt. An EDID
                        data set will only be used for a particular connector,
                        if its name and a colon are prepended to the EDID
-                       name.
+                       name. Each connector may use a unique EDID data
+                       set by separating the files with a comma.  An EDID
+                       data set with no connector name will be used for
+                       any connectors not explicitly specified.
 
        dscc4.setup=    [NET]
 
index 45e7719846b15bb0ebbdfa1b746fc4553b96445a..e814517513ce9abf4c5d65a26367955b8d761b67 100644 (file)
@@ -6,7 +6,7 @@ drm-y       :=  drm_auth.o drm_bufs.o drm_cache.o \
                drm_context.o drm_dma.o \
                drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
                drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
-               drm_agpsupport.o drm_scatter.o drm_pci.o \
+               drm_scatter.o drm_pci.o \
                drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
                drm_crtc.o drm_modes.o drm_edid.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -19,6 +19,9 @@ drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
 drm-$(CONFIG_PCI) += ati_pcigart.o
 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 drm-$(CONFIG_OF) += drm_of.o
+drm-$(CONFIG_AGP) += drm_agpsupport.o
+
+drm-y += $(drm-m)
 
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
                drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
index 6647fb26ef25ce21dba9bffb87cb31e5abe73d80..3fa1397fd7ae4fa7879ac7039b26958d78db56c7 100644 (file)
@@ -79,6 +79,8 @@ extern int amdgpu_bapm;
 extern int amdgpu_deep_color;
 extern int amdgpu_vm_size;
 extern int amdgpu_vm_block_size;
+extern int amdgpu_vm_fault_stop;
+extern int amdgpu_vm_debug;
 extern int amdgpu_enable_scheduler;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
@@ -960,6 +962,11 @@ struct amdgpu_ring {
 #define AMDGPU_PTE_FRAG_64KB   (4 << 7)
 #define AMDGPU_LOG2_PAGES_PER_FRAG 4
 
+/* How to programm VM fault handling */
+#define AMDGPU_VM_FAULT_STOP_NEVER     0
+#define AMDGPU_VM_FAULT_STOP_FIRST     1
+#define AMDGPU_VM_FAULT_STOP_ALWAYS    2
+
 struct amdgpu_vm_pt {
        struct amdgpu_bo                *bo;
        uint64_t                        addr;
@@ -1708,7 +1715,7 @@ struct amdgpu_vce {
 /*
  * SDMA
  */
-struct amdgpu_sdma {
+struct amdgpu_sdma_instance {
        /* SDMA firmware */
        const struct firmware   *fw;
        uint32_t                fw_version;
@@ -1718,6 +1725,13 @@ struct amdgpu_sdma {
        bool                    burst_nop;
 };
 
+struct amdgpu_sdma {
+       struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+       struct amdgpu_irq_src   trap_irq;
+       struct amdgpu_irq_src   illegal_inst_irq;
+       int                     num_instances;
+};
+
 /*
  * Firmware
  */
@@ -2064,9 +2078,7 @@ struct amdgpu_device {
        struct amdgpu_gfx               gfx;
 
        /* sdma */
-       struct amdgpu_sdma              sdma[AMDGPU_MAX_SDMA_INSTANCES];
-       struct amdgpu_irq_src           sdma_trap_irq;
-       struct amdgpu_irq_src           sdma_illegal_inst_irq;
+       struct amdgpu_sdma              sdma;
 
        /* uvd */
        bool                            has_uvd;
@@ -2203,17 +2215,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
        ring->ring_free_dw--;
 }
 
-static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+static inline struct amdgpu_sdma_instance *
+amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
        int i;
 
-       for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
-               if (&adev->sdma[i].ring == ring)
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               if (&adev->sdma.instance[i].ring == ring)
                        break;
 
        if (i < AMDGPU_MAX_SDMA_INSTANCES)
-               return &adev->sdma[i];
+               return &adev->sdma.instance[i];
        else
                return NULL;
 }
@@ -2349,10 +2362,10 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
                                struct drm_file *file_priv);
 int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
 int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc);
-int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc);
-void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc);
-int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
+int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
                                    int *max_error,
                                    struct timeval *vblank_time,
                                    unsigned flags);
index aef4a7aac0f705325b9804777021a7c7626e342f..a142d5ae148d91eaa89b8a92c95836ba0d0b45f3 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <linux/power_supply.h>
-#include <linux/vga_switcheroo.h>
 #include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
index dd2037bc0b4afa265435cdc3c27b4ddc2af0071b..0e1376317683e4de30803a987450dba3e01b9621 100644 (file)
@@ -649,12 +649,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
 
        case KGD_ENGINE_SDMA1:
                hdr = (const union amdgpu_firmware_header *)
-                                                       adev->sdma[0].fw->data;
+                                                       adev->sdma.instance[0].fw->data;
                break;
 
        case KGD_ENGINE_SDMA2:
                hdr = (const union amdgpu_firmware_header *)
-                                                       adev->sdma[1].fw->data;
+                                                       adev->sdma.instance[1].fw->data;
                break;
 
        default:
index dfd1d503bccfe6137f94c92b7a39478062a4878b..79fa5c7de856eab635ea9010ab0b8bfc446c37c2 100644 (file)
@@ -523,12 +523,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
 
        case KGD_ENGINE_SDMA1:
                hdr = (const union amdgpu_firmware_header *)
-                                                       adev->sdma[0].fw->data;
+                                                       adev->sdma.instance[0].fw->data;
                break;
 
        case KGD_ENGINE_SDMA2:
                hdr = (const union amdgpu_firmware_header *)
-                                                       adev->sdma[1].fw->data;
+                                                       adev->sdma.instance[1].fw->data;
                break;
 
        default:
index 3f7aaa45bf8e7ae0ee45faafa463e5425ffae463..1a6b239baab920d63e687954dbab2aa86c420422 100644 (file)
@@ -536,7 +536,7 @@ static bool amdgpu_atpx_detect(void)
 
        if (has_atpx && vga_count == 2) {
                acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
-               printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+               printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
                       acpi_method_name);
                amdgpu_atpx_priv.atpx_detected = true;
                return true;
index 02add0a508cba513bbe18b2a9ca99c1dd79352a8..c44c0c6afd1b15e3d6a3c32bfd62da7129976bfa 100644 (file)
@@ -29,7 +29,6 @@
 #include "amdgpu.h"
 #include "atom.h"
 
-#include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
 /*
index cb3c274edb0a6b23a9b830f1ca923cf07ccc45f2..baf00617fe906f8a505581cadec567503bf2ef15 100644 (file)
@@ -104,10 +104,11 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
                }
                break;
        case AMDGPU_HW_IP_DMA:
-               if (ring < 2) {
-                       *out_ring = &adev->sdma[ring].ring;
+               if (ring < adev->sdma.num_instances) {
+                       *out_ring = &adev->sdma.instance[ring].ring;
                } else {
-                       DRM_ERROR("only two SDMA rings are supported\n");
+                       DRM_ERROR("only %d SDMA rings are supported\n",
+                                 adev->sdma.num_instances);
                        return -EINVAL;
                }
                break;
@@ -177,7 +178,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 
        /* get chunks */
        INIT_LIST_HEAD(&p->validated);
-       chunk_array_user = (uint64_t __user *)(cs->in.chunks);
+       chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
        if (copy_from_user(chunk_array, chunk_array_user,
                           sizeof(uint64_t)*cs->in.num_chunks)) {
                ret = -EFAULT;
@@ -197,7 +198,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                struct drm_amdgpu_cs_chunk user_chunk;
                uint32_t __user *cdata;
 
-               chunk_ptr = (void __user *)chunk_array[i];
+               chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
                if (copy_from_user(&user_chunk, chunk_ptr,
                                       sizeof(struct drm_amdgpu_cs_chunk))) {
                        ret = -EFAULT;
@@ -208,7 +209,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
                p->chunks[i].length_dw = user_chunk.length_dw;
 
                size = p->chunks[i].length_dw;
-               cdata = (void __user *)user_chunk.chunk_data;
+               cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
                p->chunks[i].user_ptr = cdata;
 
                p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
@@ -567,9 +568,24 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
                        if (r)
                                return r;
                }
+
+       }
+
+       r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
+
+       if (amdgpu_vm_debug && p->bo_list) {
+               /* Invalidate all BOs to test for userspace bugs */
+               for (i = 0; i < p->bo_list->num_entries; i++) {
+                       /* ignore duplicates */
+                       bo = p->bo_list->array[i].robj;
+                       if (!bo)
+                               continue;
+
+                       amdgpu_vm_bo_invalidate(adev, bo);
+               }
        }
 
-       return amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
+       return r;
 }
 
 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
index 6068d8207d108413bdbe3b33035ca85b685e1ec3..901a460b2c5573bfe4ae1af93c16d8be95140420 100644 (file)
@@ -1022,7 +1022,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
  * amdgpu_switcheroo_set_state - set switcheroo state
  *
  * @pdev: pci dev pointer
- * @state: vga switcheroo state
+ * @state: vga_switcheroo state
  *
  * Callback for the switcheroo driver.  Suspends or resumes the
  * the asics before or after it is powered up using ACPI methods.
@@ -1657,11 +1657,21 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
        }
        drm_modeset_unlock_all(dev);
 
-       /* unpin the front buffers */
+       /* unpin the front buffers and cursors */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
                struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
                struct amdgpu_bo *robj;
 
+               if (amdgpu_crtc->cursor_bo) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, false);
+                       if (r == 0) {
+                               amdgpu_bo_unpin(aobj);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+
                if (rfb == NULL || rfb->obj == NULL) {
                        continue;
                }
@@ -1713,6 +1723,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
 {
        struct drm_connector *connector;
        struct amdgpu_device *adev = dev->dev_private;
+       struct drm_crtc *crtc;
        int r;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1746,6 +1757,24 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        if (r)
                return r;
 
+       /* pin cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+               if (amdgpu_crtc->cursor_bo) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, false);
+                       if (r == 0) {
+                               r = amdgpu_bo_pin(aobj,
+                                                 AMDGPU_GEM_DOMAIN_VRAM,
+                                                 &amdgpu_crtc->cursor_addr);
+                               if (r != 0)
+                                       DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+       }
+
        /* blat the mode back in */
        if (fbcon) {
                drm_helper_resume_force_mode(dev);
index e3d70772b53104f1f6a48020088d8391d10985b3..de116398fa4932f0c97f82450c63523b48b1ae68 100644 (file)
@@ -721,7 +721,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  * an optional accurate timestamp of when query happened.
  *
  * \param dev Device to query.
- * \param crtc Crtc to query.
+ * \param pipe Crtc to query.
  * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
@@ -744,8 +744,10 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  * unknown small number of scanlines wrt. real scanout position.
  *
  */
-int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
-                              int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+                              unsigned int flags, int *vpos, int *hpos,
+                              ktime_t *stime, ktime_t *etime,
+                              const struct drm_display_mode *mode)
 {
        u32 vbl = 0, position = 0;
        int vbl_start, vbl_end, vtotal, ret = 0;
@@ -759,7 +761,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
        if (stime)
                *stime = ktime_get();
 
-       if (amdgpu_display_page_flip_get_scanoutpos(adev, crtc, &vbl, &position) == 0)
+       if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
                ret |= DRM_SCANOUTPOS_VALID;
 
        /* Get optional system timestamp after query. */
@@ -781,7 +783,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
        }
        else {
                /* No: Fake something reasonable which gives at least ok results. */
-               vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+               vbl_start = mode->crtc_vdisplay;
                vbl_end = 0;
        }
 
@@ -797,7 +799,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
 
        /* Inside "upper part" of vblank area? Apply corrective offset if so: */
        if (in_vbl && (*vpos >= vbl_start)) {
-               vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+               vtotal = mode->crtc_vtotal;
                *vpos = *vpos - vtotal;
        }
 
@@ -819,8 +821,8 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
         * We only do this if DRM_CALLED_FROM_VBLIRQ.
         */
        if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
-               vbl_start = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
-               vtotal = adev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+               vbl_start = mode->crtc_vdisplay;
+               vtotal = mode->crtc_vtotal;
 
                if (vbl_start - *vpos < vtotal / 100) {
                        *vpos -= vtotal;
index adb48353f2e1a10f169df7c2cd4fc6d6f8e2c23a..ef58774b242c9e20009dc6d07b608dabeff69ae7 100644 (file)
@@ -75,11 +75,13 @@ int amdgpu_bapm = -1;
 int amdgpu_deep_color = 0;
 int amdgpu_vm_size = 8;
 int amdgpu_vm_block_size = -1;
+int amdgpu_vm_fault_stop = 0;
+int amdgpu_vm_debug = 0;
 int amdgpu_exp_hw_support = 0;
-int amdgpu_enable_scheduler = 0;
+int amdgpu_enable_scheduler = 1;
 int amdgpu_sched_jobs = 16;
 int amdgpu_sched_hw_submission = 2;
-int amdgpu_enable_semaphores = 1;
+int amdgpu_enable_semaphores = 0;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -141,10 +143,16 @@ module_param_named(vm_size, amdgpu_vm_size, int, 0444);
 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
 module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
 
+MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
+module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
+
+MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
+module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
+
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
-MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
+MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
 module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
 
 MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
@@ -153,7 +161,7 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
 MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
 module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
 
-MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
+MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
 module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
 
 static struct pci_device_id pciidlist[] = {
@@ -242,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
        {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
        /* topaz */
-       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
        /* tonga */
        {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
        {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
index 8a122b1b77861028c123301726b8bb440537ad55..96290d9cddcab6ad8f0e9e8927a71ff97a093c80 100644 (file)
@@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
                return true;
        return false;
 }
+
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
+{
+       struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
+       struct drm_fb_helper *fb_helper;
+       int ret;
+
+       if (!afbdev)
+               return;
+
+       fb_helper = &afbdev->helper;
+
+       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       if (ret)
+               DRM_DEBUG("failed to restore crtc mode\n");
+}
index b3fc26c59787f37acf3daff3d17917abf085e3be..fcad7e060938853c5f274a9fd2ddc3c438a1ab14 100644 (file)
@@ -628,8 +628,20 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
        init_waitqueue_head(&ring->fence_drv.fence_queue);
 
        if (amdgpu_enable_scheduler) {
+               long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
+               if (timeout == 0) {
+                       /*
+                        * FIXME:
+                        * Delayed workqueue cannot use it directly,
+                        * so the scheduler will not use delayed workqueue if
+                        * MAX_SCHEDULE_TIMEOUT is set.
+                        * Currently keep it simple and silly.
+                        */
+                       timeout = MAX_SCHEDULE_TIMEOUT;
+               }
                r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
-                                  amdgpu_sched_hw_submission, ring->name);
+                                  amdgpu_sched_hw_submission,
+                                  timeout, ring->name);
                if (r) {
                        DRM_ERROR("Failed to create scheduler on ring %s.\n",
                                  ring->name);
index 8c735f544b6608b0f814dfe2396650ddf9c8a34b..dd85a0ae05c33bd0311536efa9c25b605d024238 100644 (file)
@@ -218,8 +218,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        break;
                case AMDGPU_HW_IP_DMA:
                        type = AMD_IP_BLOCK_TYPE_SDMA;
-                       ring_mask = adev->sdma[0].ring.ready ? 1 : 0;
-                       ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1);
+                       for (i = 0; i < adev->sdma.num_instances; i++)
+                               ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
                        ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
                        ib_size_alignment = 1;
                        break;
@@ -341,10 +341,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        fw_info.feature = 0;
                        break;
                case AMDGPU_INFO_FW_SDMA:
-                       if (info->query_fw.index >= 2)
+                       if (info->query_fw.index >= adev->sdma.num_instances)
                                return -EINVAL;
-                       fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
-                       fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
+                       fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
+                       fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
                        break;
                default:
                        return -EINVAL;
@@ -485,14 +485,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
 /**
- * amdgpu_driver_firstopen_kms - drm callback for last close
+ * amdgpu_driver_lastclose_kms - drm callback for last close
  *
  * @dev: drm dev pointer
  *
- * Switch vga switcheroo state after last close (all asics).
+ * Switch vga_switcheroo state after last close (all asics).
  */
 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
 {
+       struct amdgpu_device *adev = dev->dev_private;
+
+       amdgpu_fbdev_restore_mode(adev);
        vga_switcheroo_process_delayed_switch();
 }
 
@@ -600,36 +603,36 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
  * amdgpu_get_vblank_counter_kms - get frame count
  *
  * @dev: drm dev pointer
- * @crtc: crtc to get the frame count from
+ * @pipe: crtc to get the frame count from
  *
  * Gets the frame count on the requested crtc (all asics).
  * Returns frame count on success, -EINVAL on failure.
  */
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc)
+u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
 {
        struct amdgpu_device *adev = dev->dev_private;
 
-       if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
-               DRM_ERROR("Invalid crtc %d\n", crtc);
+       if (pipe >= adev->mode_info.num_crtc) {
+               DRM_ERROR("Invalid crtc %u\n", pipe);
                return -EINVAL;
        }
 
-       return amdgpu_display_vblank_get_counter(adev, crtc);
+       return amdgpu_display_vblank_get_counter(adev, pipe);
 }
 
 /**
  * amdgpu_enable_vblank_kms - enable vblank interrupt
  *
  * @dev: drm dev pointer
- * @crtc: crtc to enable vblank interrupt for
+ * @pipe: crtc to enable vblank interrupt for
  *
  * Enable the interrupt on the requested crtc (all asics).
  * Returns 0 on success, -EINVAL on failure.
  */
-int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc)
+int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
 {
        struct amdgpu_device *adev = dev->dev_private;
-       int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+       int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
 
        return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
 }
@@ -638,14 +641,14 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc)
  * amdgpu_disable_vblank_kms - disable vblank interrupt
  *
  * @dev: drm dev pointer
- * @crtc: crtc to disable vblank interrupt for
+ * @pipe: crtc to disable vblank interrupt for
  *
  * Disable the interrupt on the requested crtc (all asics).
  */
-void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc)
+void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
 {
        struct amdgpu_device *adev = dev->dev_private;
-       int idx = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+       int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
 
        amdgpu_irq_put(adev, &adev->crtc_irq, idx);
 }
@@ -663,26 +666,26 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc)
  * scanout position.  (all asics).
  * Returns postive status flags on success, negative error on failure.
  */
-int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
                                    int *max_error,
                                    struct timeval *vblank_time,
                                    unsigned flags)
 {
-       struct drm_crtc *drmcrtc;
+       struct drm_crtc *crtc;
        struct amdgpu_device *adev = dev->dev_private;
 
-       if (crtc < 0 || crtc >= dev->num_crtcs) {
-               DRM_ERROR("Invalid crtc %d\n", crtc);
+       if (pipe >= dev->num_crtcs) {
+               DRM_ERROR("Invalid crtc %u\n", pipe);
                return -EINVAL;
        }
 
        /* Get associated drm_crtc: */
-       drmcrtc = &adev->mode_info.crtcs[crtc]->base;
+       crtc = &adev->mode_info.crtcs[pipe]->base;
 
        /* Helper routine in DRM core does all the work: */
-       return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+       return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
                                                     vblank_time, flags,
-                                                    drmcrtc, &drmcrtc->hwmode);
+                                                    &crtc->hwmode);
 }
 
 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
index 64efe5b52e6500f840f0ad7edbdc6a9f64db6f66..b62c1710cab6b0fc00e33013ad741c97f9583ed2 100644 (file)
@@ -373,6 +373,10 @@ struct amdgpu_crtc {
        uint32_t crtc_offset;
        struct drm_gem_object *cursor_bo;
        uint64_t cursor_addr;
+       int cursor_x;
+       int cursor_y;
+       int cursor_hot_x;
+       int cursor_hot_y;
        int cursor_width;
        int cursor_height;
        int max_cursor_width;
@@ -540,10 +544,10 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux);
 
 void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
 
-int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
-                                     unsigned int flags,
-                                     int *vpos, int *hpos, ktime_t *stime,
-                                     ktime_t *etime);
+int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+                              unsigned int flags, int *vpos, int *hpos,
+                              ktime_t *stime, ktime_t *etime,
+                              const struct drm_display_mode *mode);
 
 int amdgpu_framebuffer_init(struct drm_device *dev,
                             struct amdgpu_framebuffer *rfb,
@@ -567,6 +571,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
 
 void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
 
index 1a7708f365f37923e747dda4b37e5bd3f0ceb203..0d524384ff79c3b49d2dbaa1489d8cbff231d80a 100644 (file)
@@ -132,6 +132,8 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
                placements[c].fpfn = 0;
                placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
                        TTM_PL_FLAG_VRAM;
+               if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
+                       placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
        }
 
        if (domain & AMDGPU_GEM_DOMAIN_GTT) {
index 30dce235ddeb4e4f3660338bf5a14d6afa3864c5..b13a74b273a690959dc79b9944a73c3a83fdf9a2 100644 (file)
@@ -540,8 +540,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
 static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
 static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
 static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
-static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring);
-static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring);
+static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
+static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
 static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
 static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
 static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
index 961d7265c286524956e1100b14f084eb6e5341b0..76ecbaf72a2e81bf70d12f63c1ca9c7517fdf3e6 100644 (file)
@@ -111,7 +111,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
                      __entry->offset, __entry->flags)
 );
 
-TRACE_EVENT(amdgpu_vm_bo_update,
+DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
            TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
            TP_ARGS(mapping),
            TP_STRUCT__entry(
@@ -129,6 +129,16 @@ TRACE_EVENT(amdgpu_vm_bo_update,
                      __entry->soffset, __entry->eoffset, __entry->flags)
 );
 
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update,
+           TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(mapping)
+);
+
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
+           TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(mapping)
+);
+
 TRACE_EVENT(amdgpu_vm_set_page,
            TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
                     uint32_t incr, uint32_t flags),
index 364cbe97533298d45e9087d322c35f7c3923b52a..a089e69e9927610666ff9fb26c2be008335a473a 100644 (file)
@@ -1072,6 +1072,11 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
        spin_lock(&glob->lru_lock);
        ret = drm_mm_dump_table(m, mm);
        spin_unlock(&glob->lru_lock);
+       if (ttm_pl == TTM_PL_VRAM)
+               seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
+                          adev->mman.bdev.man[ttm_pl].size,
+                          atomic64_read(&adev->vram_usage) >> 20,
+                          atomic64_read(&adev->vram_vis_usage) >> 20);
        return ret;
 }
 
index 1e14531353e05ec7aadd69ea9d6e019310a25682..644fd9b8591f2104ff1ed18c62de70f0ba543248 100644 (file)
@@ -147,8 +147,10 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 
        /* check if the id is still valid */
        if (vm_id->id && vm_id->last_id_use &&
-           vm_id->last_id_use == adev->vm_manager.active[vm_id->id])
+           vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
+               trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
                return 0;
+       }
 
        /* we definately need to flush */
        vm_id->pd_gpu_addr = ~0ll;
@@ -455,8 +457,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                return -ENOMEM;
 
        r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
-       if (r)
+       if (r) {
+               kfree(ib);
                return r;
+       }
        ib->length_dw = 0;
 
        /* walk over the address space and update the page directory */
@@ -850,6 +854,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        return r;
        }
 
+       if (trace_amdgpu_vm_bo_mapping_enabled()) {
+               list_for_each_entry(mapping, &bo_va->valids, list)
+                       trace_amdgpu_vm_bo_mapping(mapping);
+
+               list_for_each_entry(mapping, &bo_va->invalids, list)
+                       trace_amdgpu_vm_bo_mapping(mapping);
+       }
+
        spin_lock(&vm->status_lock);
        list_splice_init(&bo_va->invalids, &bo_va->valids);
        list_del_init(&bo_va->vm_status);
index a0346a90d805062eb62dad9812f075b7a585c6ce..1b50e6c13fb3fc2b8136581088ae07e40e8739c3 100644 (file)
@@ -685,6 +685,27 @@ static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
        }
 }
 
+static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint64_t val64;
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src;
+       SDEBUG("   src1: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+       SDEBUG("   src2: ");
+       src = atom_get_src(ctx, attr, ptr);
+       if (src != 0) {
+               val64 = dst;
+               val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
+               do_div(val64, src);
+               ctx->ctx->divmul[0] = lower_32_bits(val64);
+               ctx->ctx->divmul[1] = upper_32_bits(val64);
+       } else {
+               ctx->ctx->divmul[0] = 0;
+               ctx->ctx->divmul[1] = 0;
+       }
+}
+
 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
 {
        /* functionally, a nop */
@@ -788,6 +809,20 @@ static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
        ctx->ctx->divmul[0] = dst * src;
 }
 
+static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint64_t val64;
+       uint8_t attr = U8((*ptr)++);
+       uint32_t dst, src;
+       SDEBUG("   src1: ");
+       dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
+       SDEBUG("   src2: ");
+       src = atom_get_src(ctx, attr, ptr);
+       val64 = (uint64_t)dst * (uint64_t)src;
+       ctx->ctx->divmul[0] = lower_32_bits(val64);
+       ctx->ctx->divmul[1] = upper_32_bits(val64);
+}
+
 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
 {
        /* nothing */
@@ -1022,7 +1057,15 @@ static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
 
 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
 {
-       printk(KERN_INFO "unimplemented!\n");
+       uint8_t val = U8((*ptr)++);
+       SDEBUG("DEBUG output: 0x%02X\n", val);
+}
+
+static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
+{
+       uint16_t val = U16(*ptr);
+       (*ptr) += val + 2;
+       SDEBUG("PROCESSDS output: 0x%02X\n", val);
 }
 
 static struct {
@@ -1151,7 +1194,13 @@ static struct {
        atom_op_shr, ATOM_ARG_FB}, {
        atom_op_shr, ATOM_ARG_PLL}, {
        atom_op_shr, ATOM_ARG_MC}, {
-atom_op_debug, 0},};
+       atom_op_debug, 0}, {
+       atom_op_processds, 0}, {
+       atom_op_mul32, ATOM_ARG_PS}, {
+       atom_op_mul32, ATOM_ARG_WS}, {
+       atom_op_div32, ATOM_ARG_PS}, {
+       atom_op_div32, ATOM_ARG_WS},
+};
 
 static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
 {
index 09d0f8230708a4ff08947600d6913f9a5f9c5b0d..fece8f45dc7a3308c81f22e29fbd8b2a5e1bff70 100644 (file)
@@ -60,7 +60,7 @@
 #define ATOM_CT_PS_MASK                0x7F
 #define ATOM_CT_CODE_PTR       6
 
-#define ATOM_OP_CNT            123
+#define ATOM_OP_CNT            127
 #define ATOM_OP_EOT            91
 
 #define ATOM_CASE_MAGIC                0x63
index 82e8d073051759f7b0307b7675282a8dfea280e8..a1a35a5df8e71357eea132019d3500a35a89fce4 100644 (file)
@@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle)
        if (!amdgpu_dpm)
                return 0;
 
+       /* init the sysfs and debugfs files late */
+       ret = amdgpu_pm_sysfs_init(adev);
+       if (ret)
+               return ret;
+
        ret = ci_set_temperature_range(adev);
        if (ret)
                return ret;
@@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle)
        adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
-       ret = amdgpu_pm_sysfs_init(adev);
-       if (ret)
-               goto dpm_failed;
        mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
index 4b6ce74753cded5179b17eaf698ddd16766760b1..484710cfdf8243d563afe908c2b9c9884879f971 100644 (file)
@@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
        int ret, i;
        u16 tmp16;
 
+       if (pci_is_root_bus(adev->pdev->bus))
+               return;
+
        if (amdgpu_pcie_gen2 == 0)
                return;
 
index 9ea9de457da373f702b401633ca4f7113748cbf6..814598e76c98d5b44432517ae0e5f19c0e1bb37d 100644 (file)
@@ -96,7 +96,7 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
        char fw_name[30];
-       int err, i;
+       int err = 0, i;
 
        DRM_DEBUG("\n");
 
@@ -119,24 +119,24 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                if (i == 0)
                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
                else
                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
-               err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+               err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
                if (err)
                        goto out;
-               err = amdgpu_ucode_validate(adev->sdma[i].fw);
+               err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
        }
 out:
        if (err) {
                printk(KERN_ERR
                       "cik_sdma: Failed to load firmware \"%s\"\n",
                       fw_name);
-               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-                       release_firmware(adev->sdma[i].fw);
-                       adev->sdma[i].fw = NULL;
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       release_firmware(adev->sdma.instance[i].fw);
+                       adev->sdma.instance[i].fw = NULL;
                }
        }
        return err;
@@ -168,7 +168,7 @@ static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
 static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
+       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 
        return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
 }
@@ -183,14 +183,14 @@ static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
+       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 
        WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
 }
 
 static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -248,7 +248,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
                          SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
        u32 ref_and_mask;
 
-       if (ring == &ring->adev->sdma[0].ring)
+       if (ring == &ring->adev->sdma.instance[0].ring)
                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
        else
                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@@ -327,8 +327,8 @@ static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
  */
 static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
+       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl;
        int i;
 
@@ -336,7 +336,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
            (adev->mman.buffer_funcs_ring == sdma1))
                amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -376,7 +376,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
                cik_sdma_rlc_stop(adev);
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
                if (enable)
                        me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
@@ -402,8 +402,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
        u32 wb_offset;
        int i, j, r;
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               ring = &adev->sdma[i].ring;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
                mutex_lock(&adev->srbm_mutex);
@@ -502,26 +502,25 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
        u32 fw_size;
        int i, j;
 
-       if (!adev->sdma[0].fw || !adev->sdma[1].fw)
-               return -EINVAL;
-
        /* halt the MEs */
        cik_sdma_enable(adev, false);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (!adev->sdma.instance[i].fw)
+                       return -EINVAL;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
                amdgpu_ucode_print_sdma_hdr(&hdr->header);
                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
-               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
-               if (adev->sdma[i].feature_version >= 20)
-                       adev->sdma[i].burst_nop = true;
+               adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+               if (adev->sdma.instance[i].feature_version >= 20)
+                       adev->sdma.instance[i].burst_nop = true;
                fw_data = (const __le32 *)
-                       (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
                for (j = 0; j < fw_size; j++)
                        WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
-               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
        }
 
        return 0;
@@ -830,7 +829,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
        u32 pad_count;
        int i;
 
@@ -934,6 +933,8 @@ static int cik_sdma_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+
        cik_sdma_set_ring_funcs(adev);
        cik_sdma_set_irq_funcs(adev);
        cik_sdma_set_buffer_funcs(adev);
@@ -946,7 +947,7 @@ static int cik_sdma_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       int r;
+       int r, i;
 
        r = cik_sdma_init_microcode(adev);
        if (r) {
@@ -955,43 +956,33 @@ static int cik_sdma_sw_init(void *handle)
        }
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
-       if (r)
-               return r;
-
-       ring = &adev->sdma[0].ring;
-       ring->ring_obj = NULL;
-
-       ring = &adev->sdma[1].ring;
-       ring->ring_obj = NULL;
-
-       ring = &adev->sdma[0].ring;
-       sprintf(ring->name, "sdma0");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
-                            AMDGPU_RING_TYPE_SDMA);
+       r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
-       ring = &adev->sdma[1].ring;
-       sprintf(ring->name, "sdma1");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               ring->ring_obj = NULL;
+               sprintf(ring->name, "sdma%d", i);
+               r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                                    SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
+                                    &adev->sdma.trap_irq,
+                                    (i == 0) ?
+                                    AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+                                    AMDGPU_RING_TYPE_SDMA);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -999,9 +990,10 @@ static int cik_sdma_sw_init(void *handle)
 static int cik_sdma_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i;
 
-       amdgpu_ring_fini(&adev->sdma[0].ring);
-       amdgpu_ring_fini(&adev->sdma[1].ring);
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
        return 0;
 }
@@ -1078,7 +1070,7 @@ static void cik_sdma_print_status(void *handle)
        dev_info(adev->dev, "CIK SDMA registers\n");
        dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
                 RREG32(mmSRBM_STATUS2));
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
                         i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
                dev_info(adev->dev, "  SDMA%d_ME_CNTL=0x%08X\n",
@@ -1223,7 +1215,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
        case 0:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[0].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[0].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1236,7 +1228,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
        case 1:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[1].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[1].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1334,8 +1326,10 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
 
 static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs;
-       adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
@@ -1349,9 +1343,9 @@ static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
 
 static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
-       adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs;
-       adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
+       adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
+       adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
 }
 
 /**
@@ -1416,7 +1410,7 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
 {
        if (adev->mman.buffer_funcs == NULL) {
                adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
        }
 }
 
@@ -1431,7 +1425,7 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
        if (adev->vm_manager.vm_pte_funcs == NULL) {
                adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
-               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
                adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
        }
 }
index 44fa96ad47099b765ac81e5c439766a8f9849392..2e3373ed4c942d9fc753851d50adb9a3034ebff8 100644 (file)
@@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (amdgpu_dpm) {
+               int ret;
+               /* init the sysfs and debugfs files late */
+               ret = amdgpu_pm_sysfs_init(adev);
+               if (ret)
+                       return ret;
+
                /* powerdown unused blocks for now */
                cz_dpm_powergate_uvd(adev, true);
                cz_dpm_powergate_vce(adev, true);
@@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle)
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
 
-       ret = amdgpu_pm_sysfs_init(adev);
-       if (ret)
-               goto dpm_init_failed;
-
        mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
index e4d101b1252a47eaf7a2c7e35c2d8d83f737d762..37073930e2c9f92f0024059b998231bf2ad7272c 100644 (file)
@@ -2499,26 +2499,19 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        u32 tmp;
 
+       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(amdgpu_crtc->cursor_addr));
+       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(amdgpu_crtc->cursor_addr));
+
        tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
-static void dce_v10_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
-                             uint64_t gpu_addr)
-{
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
-
-       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
-              upper_32_bits(gpu_addr));
-       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
-              lower_32_bits(gpu_addr));
-}
-
-static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
-                                    int x, int y)
+static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
+                                       int x, int y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
@@ -2538,26 +2531,40 @@ static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
                y = 0;
        }
 
-       dce_v10_0_lock_cursor(crtc, true);
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
        WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
               ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-       dce_v10_0_lock_cursor(crtc, false);
+
+       amdgpu_crtc->cursor_x = x;
+       amdgpu_crtc->cursor_y = y;
 
        return 0;
 }
 
-static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
-                                   struct drm_file *file_priv,
-                                   uint32_t handle,
-                                   uint32_t width,
-                                   uint32_t height)
+static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                     int x, int y)
+{
+       int ret;
+
+       dce_v10_0_lock_cursor(crtc, true);
+       ret = dce_v10_0_cursor_move_locked(crtc, x, y);
+       dce_v10_0_lock_cursor(crtc, false);
+
+       return ret;
+}
+
+static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
+                                     struct drm_file *file_priv,
+                                     uint32_t handle,
+                                     uint32_t width,
+                                     uint32_t height,
+                                     int32_t hot_x,
+                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct drm_gem_object *obj;
-       struct amdgpu_bo *robj;
-       uint64_t gpu_addr;
+       struct amdgpu_bo *aobj;
        int ret;
 
        if (!handle) {
@@ -2579,41 +2586,71 @@ static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
-       robj = gem_to_amdgpu_bo(obj);
-       ret = amdgpu_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
-                                      0, 0, &gpu_addr);
-       amdgpu_bo_unreserve(robj);
-       if (ret)
-               goto fail;
+       aobj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(aobj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       amdgpu_bo_unreserve(aobj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        amdgpu_crtc->cursor_width = width;
        amdgpu_crtc->cursor_height = height;
 
        dce_v10_0_lock_cursor(crtc, true);
-       dce_v10_0_set_cursor(crtc, obj, gpu_addr);
+
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+           hot_y != amdgpu_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+               y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+               dce_v10_0_cursor_move_locked(crtc, x, y);
+
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
+       }
+
        dce_v10_0_show_cursor(crtc);
        dce_v10_0_lock_cursor(crtc, false);
 
 unpin:
        if (amdgpu_crtc->cursor_bo) {
-               robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(robj, false);
+               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(aobj, false);
                if (likely(ret == 0)) {
-                       amdgpu_bo_unpin(robj);
-                       amdgpu_bo_unreserve(robj);
+                       amdgpu_bo_unpin(aobj);
+                       amdgpu_bo_unreserve(aobj);
                }
                drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
        }
 
        amdgpu_crtc->cursor_bo = obj;
        return 0;
-fail:
-       drm_gem_object_unreference_unlocked(obj);
+}
 
-       return ret;
+static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (amdgpu_crtc->cursor_bo) {
+               dce_v10_0_lock_cursor(crtc, true);
+
+               dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+                                            amdgpu_crtc->cursor_y);
+
+               dce_v10_0_show_cursor(crtc);
+
+               dce_v10_0_lock_cursor(crtc, false);
+       }
 }
 
 static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -2641,7 +2678,7 @@ static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
-       .cursor_set = dce_v10_0_crtc_cursor_set,
+       .cursor_set2 = dce_v10_0_crtc_cursor_set2,
        .cursor_move = dce_v10_0_crtc_cursor_move,
        .gamma_set = dce_v10_0_crtc_gamma_set,
        .set_config = amdgpu_crtc_set_config,
@@ -2774,6 +2811,7 @@ static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
        dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
        amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
        amdgpu_atombios_crtc_scaler_setup(crtc);
+       dce_v10_0_cursor_reset(crtc);
        /* update the hw version fpr dpm */
        amdgpu_crtc->hw_mode = *adjusted_mode;
 
@@ -3267,37 +3305,20 @@ static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
                                            unsigned type,
                                            enum amdgpu_interrupt_state state)
 {
-       u32 reg, reg_block;
-       /* now deal with page flip IRQ */
-       switch (type) {
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", type);
-                       return -EINVAL;
+       u32 reg;
+
+       if (type >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", type);
+               return -EINVAL;
        }
 
-       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
+       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
        if (state == AMDGPU_IRQ_STATE_DISABLE)
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
        else
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
 
        return 0;
 }
@@ -3306,7 +3327,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
                                  struct amdgpu_irq_src *source,
                                  struct amdgpu_iv_entry *entry)
 {
-       int reg_block;
        unsigned long flags;
        unsigned crtc_id;
        struct amdgpu_crtc *amdgpu_crtc;
@@ -3315,33 +3335,15 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
        crtc_id = (entry->src_id - 8) >> 1;
        amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
-       /* ack the interrupt */
-       switch(crtc_id){
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
-                       return -EINVAL;
+       if (crtc_id >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+               return -EINVAL;
        }
 
-       if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
-               WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+       if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
+           GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
+                      GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
 
        /* IRQ could occur when in initial stage */
        if (amdgpu_crtc == NULL)
index 6411e824467164831eef8af634051f95b8faba69..5be83a3f019210fa87ee3840d5c1eb74f6cc3dce 100644 (file)
@@ -2476,26 +2476,19 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        u32 tmp;
 
+       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(amdgpu_crtc->cursor_addr));
+       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(amdgpu_crtc->cursor_addr));
+
        tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
        tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
-static void dce_v11_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
-                             uint64_t gpu_addr)
-{
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
-
-       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
-              upper_32_bits(gpu_addr));
-       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
-              lower_32_bits(gpu_addr));
-}
-
-static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
-                                    int x, int y)
+static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
+                                       int x, int y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
@@ -2515,26 +2508,40 @@ static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
                y = 0;
        }
 
-       dce_v11_0_lock_cursor(crtc, true);
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
        WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
               ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-       dce_v11_0_lock_cursor(crtc, false);
+
+       amdgpu_crtc->cursor_x = x;
+       amdgpu_crtc->cursor_y = y;
 
        return 0;
 }
 
-static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc,
-                                   struct drm_file *file_priv,
-                                   uint32_t handle,
-                                   uint32_t width,
-                                   uint32_t height)
+static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                     int x, int y)
+{
+       int ret;
+
+       dce_v11_0_lock_cursor(crtc, true);
+       ret = dce_v11_0_cursor_move_locked(crtc, x, y);
+       dce_v11_0_lock_cursor(crtc, false);
+
+       return ret;
+}
+
+static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
+                                     struct drm_file *file_priv,
+                                     uint32_t handle,
+                                     uint32_t width,
+                                     uint32_t height,
+                                     int32_t hot_x,
+                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct drm_gem_object *obj;
-       struct amdgpu_bo *robj;
-       uint64_t gpu_addr;
+       struct amdgpu_bo *aobj;
        int ret;
 
        if (!handle) {
@@ -2556,41 +2563,71 @@ static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
-       robj = gem_to_amdgpu_bo(obj);
-       ret = amdgpu_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
-                                      0, 0, &gpu_addr);
-       amdgpu_bo_unreserve(robj);
-       if (ret)
-               goto fail;
+       aobj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(aobj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       amdgpu_bo_unreserve(aobj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        amdgpu_crtc->cursor_width = width;
        amdgpu_crtc->cursor_height = height;
 
        dce_v11_0_lock_cursor(crtc, true);
-       dce_v11_0_set_cursor(crtc, obj, gpu_addr);
+
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+           hot_y != amdgpu_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+               y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+               dce_v11_0_cursor_move_locked(crtc, x, y);
+
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
+       }
+
        dce_v11_0_show_cursor(crtc);
        dce_v11_0_lock_cursor(crtc, false);
 
 unpin:
        if (amdgpu_crtc->cursor_bo) {
-               robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(robj, false);
+               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(aobj, false);
                if (likely(ret == 0)) {
-                       amdgpu_bo_unpin(robj);
-                       amdgpu_bo_unreserve(robj);
+                       amdgpu_bo_unpin(aobj);
+                       amdgpu_bo_unreserve(aobj);
                }
                drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
        }
 
        amdgpu_crtc->cursor_bo = obj;
        return 0;
-fail:
-       drm_gem_object_unreference_unlocked(obj);
+}
 
-       return ret;
+static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (amdgpu_crtc->cursor_bo) {
+               dce_v11_0_lock_cursor(crtc, true);
+
+               dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+                                            amdgpu_crtc->cursor_y);
+
+               dce_v11_0_show_cursor(crtc);
+
+               dce_v11_0_lock_cursor(crtc, false);
+       }
 }
 
 static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -2618,7 +2655,7 @@ static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
-       .cursor_set = dce_v11_0_crtc_cursor_set,
+       .cursor_set2 = dce_v11_0_crtc_cursor_set2,
        .cursor_move = dce_v11_0_crtc_cursor_move,
        .gamma_set = dce_v11_0_crtc_gamma_set,
        .set_config = amdgpu_crtc_set_config,
@@ -2751,6 +2788,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
        dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
        amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
        amdgpu_atombios_crtc_scaler_setup(crtc);
+       dce_v11_0_cursor_reset(crtc);
        /* update the hw version fpr dpm */
        amdgpu_crtc->hw_mode = *adjusted_mode;
 
@@ -2888,7 +2926,7 @@ static int dce_v11_0_early_init(void *handle)
 
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
-               adev->mode_info.num_crtc = 4;
+               adev->mode_info.num_crtc = 3;
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 9;
                break;
@@ -3243,37 +3281,20 @@ static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
                                            unsigned type,
                                            enum amdgpu_interrupt_state state)
 {
-       u32 reg, reg_block;
-       /* now deal with page flip IRQ */
-       switch (type) {
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", type);
-                       return -EINVAL;
+       u32 reg;
+
+       if (type >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", type);
+               return -EINVAL;
        }
 
-       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
+       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
        if (state == AMDGPU_IRQ_STATE_DISABLE)
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
        else
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
 
        return 0;
 }
@@ -3282,7 +3303,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
                                  struct amdgpu_irq_src *source,
                                  struct amdgpu_iv_entry *entry)
 {
-       int reg_block;
        unsigned long flags;
        unsigned crtc_id;
        struct amdgpu_crtc *amdgpu_crtc;
@@ -3291,33 +3311,15 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
        crtc_id = (entry->src_id - 8) >> 1;
        amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
-       /* ack the interrupt */
-       switch(crtc_id){
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
-                       return -EINVAL;
+       if (crtc_id >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+               return -EINVAL;
        }
 
-       if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
-               WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+       if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
+           GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
+                      GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
 
        /* IRQ could occur when in initial stage */
        if(amdgpu_crtc == NULL)
index c86911c2ea2a896f414473f798d782e9c08518cf..d784fb43efc2d4c54f1d653a09b6fccdb2ddd958 100644 (file)
@@ -2411,26 +2411,19 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
 
+       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+              upper_32_bits(amdgpu_crtc->cursor_addr));
+       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+              lower_32_bits(amdgpu_crtc->cursor_addr));
+
        WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
                   CUR_CONTROL__CURSOR_EN_MASK |
                   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
                   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 }
 
-static void dce_v8_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
-                             uint64_t gpu_addr)
-{
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
-
-       WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
-              upper_32_bits(gpu_addr));
-       WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
-              gpu_addr & 0xffffffff);
-}
-
-static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
-                                    int x, int y)
+static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
+                                      int x, int y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = crtc->dev->dev_private;
@@ -2450,26 +2443,40 @@ static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
                y = 0;
        }
 
-       dce_v8_0_lock_cursor(crtc, true);
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
        WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
               ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-       dce_v8_0_lock_cursor(crtc, false);
+
+       amdgpu_crtc->cursor_x = x;
+       amdgpu_crtc->cursor_y = y;
 
        return 0;
 }
 
-static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc,
-                                   struct drm_file *file_priv,
-                                   uint32_t handle,
-                                   uint32_t width,
-                                   uint32_t height)
+static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
+                                    int x, int y)
+{
+       int ret;
+
+       dce_v8_0_lock_cursor(crtc, true);
+       ret = dce_v8_0_cursor_move_locked(crtc, x, y);
+       dce_v8_0_lock_cursor(crtc, false);
+
+       return ret;
+}
+
+static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
+                                    struct drm_file *file_priv,
+                                    uint32_t handle,
+                                    uint32_t width,
+                                    uint32_t height,
+                                    int32_t hot_x,
+                                    int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
        struct drm_gem_object *obj;
-       struct amdgpu_bo *robj;
-       uint64_t gpu_addr;
+       struct amdgpu_bo *aobj;
        int ret;
 
        if (!handle) {
@@ -2491,41 +2498,71 @@ static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc,
                return -ENOENT;
        }
 
-       robj = gem_to_amdgpu_bo(obj);
-       ret = amdgpu_bo_reserve(robj, false);
-       if (unlikely(ret != 0))
-               goto fail;
-       ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
-                                      0, 0, &gpu_addr);
-       amdgpu_bo_unreserve(robj);
-       if (ret)
-               goto fail;
+       aobj = gem_to_amdgpu_bo(obj);
+       ret = amdgpu_bo_reserve(aobj, false);
+       if (ret != 0) {
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
+
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       amdgpu_bo_unreserve(aobj);
+       if (ret) {
+               DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
+               drm_gem_object_unreference_unlocked(obj);
+               return ret;
+       }
 
        amdgpu_crtc->cursor_width = width;
        amdgpu_crtc->cursor_height = height;
 
        dce_v8_0_lock_cursor(crtc, true);
-       dce_v8_0_set_cursor(crtc, obj, gpu_addr);
+
+       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+           hot_y != amdgpu_crtc->cursor_hot_y) {
+               int x, y;
+
+               x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
+               y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
+
+               dce_v8_0_cursor_move_locked(crtc, x, y);
+
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
+       }
+
        dce_v8_0_show_cursor(crtc);
        dce_v8_0_lock_cursor(crtc, false);
 
 unpin:
        if (amdgpu_crtc->cursor_bo) {
-               robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-               ret = amdgpu_bo_reserve(robj, false);
+               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+               ret = amdgpu_bo_reserve(aobj, false);
                if (likely(ret == 0)) {
-                       amdgpu_bo_unpin(robj);
-                       amdgpu_bo_unreserve(robj);
+                       amdgpu_bo_unpin(aobj);
+                       amdgpu_bo_unreserve(aobj);
                }
                drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
        }
 
        amdgpu_crtc->cursor_bo = obj;
        return 0;
-fail:
-       drm_gem_object_unreference_unlocked(obj);
+}
 
-       return ret;
+static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
+{
+       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+       if (amdgpu_crtc->cursor_bo) {
+               dce_v8_0_lock_cursor(crtc, true);
+
+               dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
+                                           amdgpu_crtc->cursor_y);
+
+               dce_v8_0_show_cursor(crtc);
+
+               dce_v8_0_lock_cursor(crtc, false);
+       }
 }
 
 static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@@ -2553,7 +2590,7 @@ static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
-       .cursor_set = dce_v8_0_crtc_cursor_set,
+       .cursor_set2 = dce_v8_0_crtc_cursor_set2,
        .cursor_move = dce_v8_0_crtc_cursor_move,
        .gamma_set = dce_v8_0_crtc_gamma_set,
        .set_config = amdgpu_crtc_set_config,
@@ -2693,6 +2730,7 @@ static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
        dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
        amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
        amdgpu_atombios_crtc_scaler_setup(crtc);
+       dce_v8_0_cursor_reset(crtc);
        /* update the hw version fpr dpm */
        amdgpu_crtc->hw_mode = *adjusted_mode;
 
@@ -3274,37 +3312,20 @@ static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
                                                 unsigned type,
                                                 enum amdgpu_interrupt_state state)
 {
-       u32 reg, reg_block;
-       /* now deal with page flip IRQ */
-       switch (type) {
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", type);
-                       return -EINVAL;
+       u32 reg;
+
+       if (type >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", type);
+               return -EINVAL;
        }
 
-       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block);
+       reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
        if (state == AMDGPU_IRQ_STATE_DISABLE)
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
        else
-               WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
+               WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
+                      reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
 
        return 0;
 }
@@ -3313,7 +3334,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
                                struct amdgpu_irq_src *source,
                                struct amdgpu_iv_entry *entry)
 {
-       int reg_block;
        unsigned long flags;
        unsigned crtc_id;
        struct amdgpu_crtc *amdgpu_crtc;
@@ -3322,33 +3342,15 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
        crtc_id = (entry->src_id - 8) >> 1;
        amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
-       /* ack the interrupt */
-       switch(crtc_id){
-               case AMDGPU_PAGEFLIP_IRQ_D1:
-                       reg_block = CRTC0_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D2:
-                       reg_block = CRTC1_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D3:
-                       reg_block = CRTC2_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D4:
-                       reg_block = CRTC3_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D5:
-                       reg_block = CRTC4_REGISTER_OFFSET;
-                       break;
-               case AMDGPU_PAGEFLIP_IRQ_D6:
-                       reg_block = CRTC5_REGISTER_OFFSET;
-                       break;
-               default:
-                       DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
-                       return -EINVAL;
+       if (crtc_id >= adev->mode_info.num_crtc) {
+               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
+               return -EINVAL;
        }
 
-       if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
-               WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
+       if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
+           GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
+               WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
+                      GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
 
        /* IRQ could occur when in initial stage */
        if (amdgpu_crtc == NULL)
index cb4f68f53f248ab58cdb00100dc0da5d6f81da9d..718250ae9856b144c86d6f0ad5db5246ca16c737 100644 (file)
@@ -903,6 +903,191 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
        return 0;
 }
 
+static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
+{
+       u32 gb_addr_config;
+       u32 mc_shared_chmap, mc_arb_ramcfg;
+       u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
+       u32 tmp;
+
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 2;
+               adev->gfx.config.max_cu_per_sh = 6;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 2;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_FIJI:
+               adev->gfx.config.max_shader_engines = 4;
+               adev->gfx.config.max_tile_pipes = 16;
+               adev->gfx.config.max_cu_per_sh = 16;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 4;
+               adev->gfx.config.max_texture_channel_caches = 8;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_TONGA:
+               adev->gfx.config.max_shader_engines = 4;
+               adev->gfx.config.max_tile_pipes = 8;
+               adev->gfx.config.max_cu_per_sh = 8;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 8;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       case CHIP_CARRIZO:
+               adev->gfx.config.max_shader_engines = 1;
+               adev->gfx.config.max_tile_pipes = 2;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+
+               switch (adev->pdev->revision) {
+               case 0xc4:
+               case 0x84:
+               case 0xc8:
+               case 0xcc:
+                       /* B10 */
+                       adev->gfx.config.max_cu_per_sh = 8;
+                       break;
+               case 0xc5:
+               case 0x81:
+               case 0x85:
+               case 0xc9:
+               case 0xcd:
+                       /* B8 */
+                       adev->gfx.config.max_cu_per_sh = 6;
+                       break;
+               case 0xc6:
+               case 0xca:
+               case 0xce:
+                       /* B6 */
+                       adev->gfx.config.max_cu_per_sh = 6;
+                       break;
+               case 0xc7:
+               case 0x87:
+               case 0xcb:
+               default:
+                       /* B4 */
+                       adev->gfx.config.max_cu_per_sh = 4;
+                       break;
+               }
+
+               adev->gfx.config.max_texture_channel_caches = 2;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       default:
+               adev->gfx.config.max_shader_engines = 2;
+               adev->gfx.config.max_tile_pipes = 4;
+               adev->gfx.config.max_cu_per_sh = 2;
+               adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
+               adev->gfx.config.max_texture_channel_caches = 4;
+               adev->gfx.config.max_gprs = 256;
+               adev->gfx.config.max_gs_threads = 32;
+               adev->gfx.config.max_hw_contexts = 8;
+
+               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+               break;
+       }
+
+       mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
+       adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
+       mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+
+       adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
+       adev->gfx.config.mem_max_burst_length_bytes = 256;
+       if (adev->flags & AMD_IS_APU) {
+               /* Get memory bank mapping mode. */
+               tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
+               dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+               dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+               tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
+               dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
+               dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
+
+               /* Validate settings in case only one DIMM installed. */
+               if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
+                       dimm00_addr_map = 0;
+               if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
+                       dimm01_addr_map = 0;
+               if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
+                       dimm10_addr_map = 0;
+               if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
+                       dimm11_addr_map = 0;
+
+               /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
+               /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
+               if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
+                       adev->gfx.config.mem_row_size_in_kb = 2;
+               else
+                       adev->gfx.config.mem_row_size_in_kb = 1;
+       } else {
+               tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
+               adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+               if (adev->gfx.config.mem_row_size_in_kb > 4)
+                       adev->gfx.config.mem_row_size_in_kb = 4;
+       }
+
+       adev->gfx.config.shader_engine_tile_size = 32;
+       adev->gfx.config.num_gpus = 1;
+       adev->gfx.config.multi_gpu_tile_size = 64;
+
+       /* fix up row size */
+       switch (adev->gfx.config.mem_row_size_in_kb) {
+       case 1:
+       default:
+               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
+               break;
+       case 2:
+               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
+               break;
+       case 4:
+               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
+               break;
+       }
+       adev->gfx.config.gb_addr_config = gb_addr_config;
+}
+
 static int gfx_v8_0_sw_init(void *handle)
 {
        int i, r;
@@ -1010,6 +1195,8 @@ static int gfx_v8_0_sw_init(void *handle)
 
        adev->gfx.ce_ram_size = 0x8000;
 
+       gfx_v8_0_gpu_early_init(adev);
+
        return 0;
 }
 
@@ -2043,203 +2230,23 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
 
 static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
 {
-       u32 gb_addr_config;
-       u32 mc_shared_chmap, mc_arb_ramcfg;
-       u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
        u32 tmp;
        int i;
 
-       switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-               adev->gfx.config.max_shader_engines = 1;
-               adev->gfx.config.max_tile_pipes = 2;
-               adev->gfx.config.max_cu_per_sh = 6;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 2;
-               adev->gfx.config.max_texture_channel_caches = 2;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       case CHIP_FIJI:
-               adev->gfx.config.max_shader_engines = 4;
-               adev->gfx.config.max_tile_pipes = 16;
-               adev->gfx.config.max_cu_per_sh = 16;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 4;
-               adev->gfx.config.max_texture_channel_caches = 8;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       case CHIP_TONGA:
-               adev->gfx.config.max_shader_engines = 4;
-               adev->gfx.config.max_tile_pipes = 8;
-               adev->gfx.config.max_cu_per_sh = 8;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 2;
-               adev->gfx.config.max_texture_channel_caches = 8;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       case CHIP_CARRIZO:
-               adev->gfx.config.max_shader_engines = 1;
-               adev->gfx.config.max_tile_pipes = 2;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 2;
-
-               switch (adev->pdev->revision) {
-               case 0xc4:
-               case 0x84:
-               case 0xc8:
-               case 0xcc:
-                       /* B10 */
-                       adev->gfx.config.max_cu_per_sh = 8;
-                       break;
-               case 0xc5:
-               case 0x81:
-               case 0x85:
-               case 0xc9:
-               case 0xcd:
-                       /* B8 */
-                       adev->gfx.config.max_cu_per_sh = 6;
-                       break;
-               case 0xc6:
-               case 0xca:
-               case 0xce:
-                       /* B6 */
-                       adev->gfx.config.max_cu_per_sh = 6;
-                       break;
-               case 0xc7:
-               case 0x87:
-               case 0xcb:
-               default:
-                       /* B4 */
-                       adev->gfx.config.max_cu_per_sh = 4;
-                       break;
-               }
-
-               adev->gfx.config.max_texture_channel_caches = 2;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       default:
-               adev->gfx.config.max_shader_engines = 2;
-               adev->gfx.config.max_tile_pipes = 4;
-               adev->gfx.config.max_cu_per_sh = 2;
-               adev->gfx.config.max_sh_per_se = 1;
-               adev->gfx.config.max_backends_per_se = 2;
-               adev->gfx.config.max_texture_channel_caches = 4;
-               adev->gfx.config.max_gprs = 256;
-               adev->gfx.config.max_gs_threads = 32;
-               adev->gfx.config.max_hw_contexts = 8;
-
-               adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
-               adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
-               adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
-               adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
-               break;
-       }
-
        tmp = RREG32(mmGRBM_CNTL);
        tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
        WREG32(mmGRBM_CNTL, tmp);
 
-       mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
-       adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
-       mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
-
-       adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
-       adev->gfx.config.mem_max_burst_length_bytes = 256;
-       if (adev->flags & AMD_IS_APU) {
-               /* Get memory bank mapping mode. */
-               tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
-               dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
-               dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
-
-               tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
-               dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
-               dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
-
-               /* Validate settings in case only one DIMM installed. */
-               if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
-                       dimm00_addr_map = 0;
-               if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
-                       dimm01_addr_map = 0;
-               if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
-                       dimm10_addr_map = 0;
-               if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
-                       dimm11_addr_map = 0;
-
-               /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
-               /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
-               if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
-                       adev->gfx.config.mem_row_size_in_kb = 2;
-               else
-                       adev->gfx.config.mem_row_size_in_kb = 1;
-       } else {
-               tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
-               adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
-               if (adev->gfx.config.mem_row_size_in_kb > 4)
-                       adev->gfx.config.mem_row_size_in_kb = 4;
-       }
-
-       adev->gfx.config.shader_engine_tile_size = 32;
-       adev->gfx.config.num_gpus = 1;
-       adev->gfx.config.multi_gpu_tile_size = 64;
-
-       /* fix up row size */
-       switch (adev->gfx.config.mem_row_size_in_kb) {
-       case 1:
-       default:
-               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
-               break;
-       case 2:
-               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
-               break;
-       case 4:
-               gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
-               break;
-       }
-       adev->gfx.config.gb_addr_config = gb_addr_config;
-
-       WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
-       WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
-       WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
+       WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+       WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+       WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
        WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET,
-              gb_addr_config & 0x70);
+              adev->gfx.config.gb_addr_config & 0x70);
        WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET,
-              gb_addr_config & 0x70);
-       WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config);
-       WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
-       WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
+              adev->gfx.config.gb_addr_config & 0x70);
+       WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+       WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+       WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
 
        gfx_v8_0_tiling_mode_table_init(adev);
 
@@ -2256,13 +2263,13 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                if (i == 0) {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
-                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, 
+                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        WREG32(mmSH_MEM_CONFIG, tmp);
                } else {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC);
-                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, 
+                       tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        WREG32(mmSH_MEM_CONFIG, tmp);
                }
index fab5471d25d7e3dc3a3605d22c52fd669e919f7b..488348272c4d1a0e5a9d93d80277abdf5407039e 100644 (file)
@@ -435,6 +435,33 @@ static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
        return 0;
 }
 
+/**
+ * gmc_v8_0_set_fault_enable_default - update VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
+                                             bool value)
+{
+       u32 tmp;
+
+       tmp = RREG32(mmVM_CONTEXT1_CNTL);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       WREG32(mmVM_CONTEXT1_CNTL, tmp);
+}
+
 /**
  * gmc_v7_0_gart_enable - gart enable
  *
@@ -523,15 +550,13 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
        tmp = RREG32(mmVM_CONTEXT1_CNTL);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
-       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
                            amdgpu_vm_block_size - 9);
        WREG32(mmVM_CONTEXT1_CNTL, tmp);
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+               gmc_v7_0_set_fault_enable_default(adev, false);
+       else
+               gmc_v7_0_set_fault_enable_default(adev, true);
 
        if (adev->asic_type == CHIP_KAVERI) {
                tmp = RREG32(mmCHUB_CONTROL);
@@ -1268,6 +1293,9 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        if (!addr && !status)
                return 0;
 
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+               gmc_v7_0_set_fault_enable_default(adev, false);
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
index 7bc9e9fcf3d26cbbaa6d7aa76fbef0349964ec6f..42b5ff827055146017b707c59fca8ac3d9fc9b7a 100644 (file)
@@ -549,6 +549,35 @@ static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
        return 0;
 }
 
+/**
+ * gmc_v8_0_set_fault_enable_default - update VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
+                                             bool value)
+{
+       u32 tmp;
+
+       tmp = RREG32(mmVM_CONTEXT1_CNTL);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+                           EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+       WREG32(mmVM_CONTEXT1_CNTL, tmp);
+}
+
 /**
  * gmc_v8_0_gart_enable - gart enable
  *
@@ -663,6 +692,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
                            amdgpu_vm_block_size - 9);
        WREG32(mmVM_CONTEXT1_CNTL, tmp);
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+               gmc_v8_0_set_fault_enable_default(adev, false);
+       else
+               gmc_v8_0_set_fault_enable_default(adev, true);
 
        gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -1268,6 +1301,9 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        if (!addr && !status)
                return 0;
 
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
+               gmc_v8_0_set_fault_enable_default(adev, false);
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
index 94ec04a9c4d5c975eeb329dc770d3dd055b74ae5..9745ed3a9aef866443e269022142c52ec3fc3d65 100644 (file)
@@ -2995,6 +2995,12 @@ static int kv_dpm_late_init(void *handle)
 {
        /* powerdown unused blocks for now */
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int ret;
+
+       /* init the sysfs and debugfs files late */
+       ret = amdgpu_pm_sysfs_init(adev);
+       if (ret)
+               return ret;
 
        kv_dpm_powergate_acp(adev, true);
        kv_dpm_powergate_samu(adev, true);
@@ -3038,9 +3044,6 @@ static int kv_dpm_sw_init(void *handle)
        adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
-       ret = amdgpu_pm_sysfs_init(adev);
-       if (ret)
-               goto dpm_failed;
        mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
index 14e87234171aeacb9cbbbedafa358e22825b5526..f8b868c7c4960f80f6f33e472becb1aeaff9cafb 100644 (file)
@@ -118,7 +118,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
        char fw_name[30];
-       int err, i;
+       int err = 0, i;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
        const struct sdma_firmware_header_v1_0 *hdr;
@@ -132,27 +132,27 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                if (i == 0)
                        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
                else
                        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
-               err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+               err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
                if (err)
                        goto out;
-               err = amdgpu_ucode_validate(adev->sdma[i].fw);
+               err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
                if (err)
                        goto out;
-               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
-               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
-               if (adev->sdma[i].feature_version >= 20)
-                       adev->sdma[i].burst_nop = true;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+               adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+               if (adev->sdma.instance[i].feature_version >= 20)
+                       adev->sdma.instance[i].burst_nop = true;
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
                        info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
-                       info->fw = adev->sdma[i].fw;
+                       info->fw = adev->sdma.instance[i].fw;
                        header = (const struct common_firmware_header *)info->fw->data;
                        adev->firmware.fw_size +=
                                ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
@@ -164,9 +164,9 @@ out:
                printk(KERN_ERR
                       "sdma_v2_4: Failed to load firmware \"%s\"\n",
                       fw_name);
-               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-                       release_firmware(adev->sdma[i].fw);
-                       adev->sdma[i].fw = NULL;
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       release_firmware(adev->sdma.instance[i].fw);
+                       adev->sdma.instance[i].fw = NULL;
                }
        }
        return err;
@@ -199,7 +199,7 @@ static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+       int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
        u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
 
        return wptr;
@@ -215,14 +215,14 @@ static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+       int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 
        WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
 }
 
 static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -284,7 +284,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask = 0;
 
-       if (ring == &ring->adev->sdma[0].ring)
+       if (ring == &ring->adev->sdma.instance[0].ring)
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
        else
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -368,8 +368,8 @@ static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring,
  */
 static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
+       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
@@ -377,7 +377,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
            (adev->mman.buffer_funcs_ring == sdma1))
                amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -419,7 +419,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
                sdma_v2_4_rlc_stop(adev);
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
                if (enable)
                        f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
@@ -445,8 +445,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
        u32 wb_offset;
        int i, j, r;
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               ring = &adev->sdma[i].ring;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
                mutex_lock(&adev->srbm_mutex);
@@ -545,29 +545,23 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
        const __le32 *fw_data;
        u32 fw_size;
        int i, j;
-       bool smc_loads_fw = false; /* XXX fix me */
-
-       if (!adev->sdma[0].fw || !adev->sdma[1].fw)
-               return -EINVAL;
 
        /* halt the MEs */
        sdma_v2_4_enable(adev, false);
 
-       if (smc_loads_fw) {
-               /* XXX query SMC for fw load complete */
-       } else {
-               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-                       hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
-                       amdgpu_ucode_print_sdma_hdr(&hdr->header);
-                       fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
-                       fw_data = (const __le32 *)
-                               (adev->sdma[i].fw->data +
-                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-                       WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
-                       for (j = 0; j < fw_size; j++)
-                               WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
-                       WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
-               }
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (!adev->sdma.instance[i].fw)
+                       return -EINVAL;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+               amdgpu_ucode_print_sdma_hdr(&hdr->header);
+               fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+               fw_data = (const __le32 *)
+                       (adev->sdma.instance[i].fw->data +
+                        le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
+               for (j = 0; j < fw_size; j++)
+                       WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
        }
 
        return 0;
@@ -894,7 +888,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
        u32 pad_count;
        int i;
 
@@ -952,6 +946,8 @@ static int sdma_v2_4_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+
        sdma_v2_4_set_ring_funcs(adev);
        sdma_v2_4_set_buffer_funcs(adev);
        sdma_v2_4_set_vm_pte_funcs(adev);
@@ -963,21 +959,21 @@ static int sdma_v2_4_early_init(void *handle)
 static int sdma_v2_4_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
-       int r;
+       int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
@@ -987,31 +983,20 @@ static int sdma_v2_4_sw_init(void *handle)
                return r;
        }
 
-       ring = &adev->sdma[0].ring;
-       ring->ring_obj = NULL;
-       ring->use_doorbell = false;
-
-       ring = &adev->sdma[1].ring;
-       ring->ring_obj = NULL;
-       ring->use_doorbell = false;
-
-       ring = &adev->sdma[0].ring;
-       sprintf(ring->name, "sdma0");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
-
-       ring = &adev->sdma[1].ring;
-       sprintf(ring->name, "sdma1");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               ring->ring_obj = NULL;
+               ring->use_doorbell = false;
+               sprintf(ring->name, "sdma%d", i);
+               r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                                    SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
+                                    &adev->sdma.trap_irq,
+                                    (i == 0) ?
+                                    AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+                                    AMDGPU_RING_TYPE_SDMA);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -1019,9 +1004,10 @@ static int sdma_v2_4_sw_init(void *handle)
 static int sdma_v2_4_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i;
 
-       amdgpu_ring_fini(&adev->sdma[0].ring);
-       amdgpu_ring_fini(&adev->sdma[1].ring);
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
        return 0;
 }
@@ -1100,7 +1086,7 @@ static void sdma_v2_4_print_status(void *handle)
        dev_info(adev->dev, "VI SDMA registers\n");
        dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
                 RREG32(mmSRBM_STATUS2));
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
                         i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
                dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
@@ -1243,7 +1229,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
        case 0:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[0].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[0].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1256,7 +1242,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
        case 1:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[1].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[1].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1345,8 +1331,10 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
 
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs;
-       adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
@@ -1360,9 +1348,9 @@ static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
 
 static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
-       adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
-       adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
+       adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
+       adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
 }
 
 /**
@@ -1428,7 +1416,7 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
 {
        if (adev->mman.buffer_funcs == NULL) {
                adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
        }
 }
 
@@ -1443,7 +1431,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
        if (adev->vm_manager.vm_pte_funcs == NULL) {
                adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
-               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
                adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
        }
 }
index 9bfe92df15f712b86f45e674a8c995f10812ed0f..670555a45da91b052446bb99eba4599a082efa46 100644 (file)
@@ -184,7 +184,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
        char fw_name[30];
-       int err, i;
+       int err = 0, i;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
        const struct sdma_firmware_header_v1_0 *hdr;
@@ -204,27 +204,27 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                if (i == 0)
                        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
                else
                        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
-               err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
+               err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
                if (err)
                        goto out;
-               err = amdgpu_ucode_validate(adev->sdma[i].fw);
+               err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
                if (err)
                        goto out;
-               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
-               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
-               if (adev->sdma[i].feature_version >= 20)
-                       adev->sdma[i].burst_nop = true;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
+               adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
+               if (adev->sdma.instance[i].feature_version >= 20)
+                       adev->sdma.instance[i].burst_nop = true;
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
                        info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
-                       info->fw = adev->sdma[i].fw;
+                       info->fw = adev->sdma.instance[i].fw;
                        header = (const struct common_firmware_header *)info->fw->data;
                        adev->firmware.fw_size +=
                                ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
@@ -235,9 +235,9 @@ out:
                printk(KERN_ERR
                       "sdma_v3_0: Failed to load firmware \"%s\"\n",
                       fw_name);
-               for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-                       release_firmware(adev->sdma[i].fw);
-                       adev->sdma[i].fw = NULL;
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       release_firmware(adev->sdma.instance[i].fw);
+                       adev->sdma.instance[i].fw = NULL;
                }
        }
        return err;
@@ -276,7 +276,7 @@ static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
                /* XXX check if swapping is necessary on BE */
                wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
        } else {
-               int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+               int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 
                wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
        }
@@ -300,7 +300,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
                adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
                WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
        } else {
-               int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1;
+               int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
        }
@@ -308,7 +308,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 
 static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
        int i;
 
        for (i = 0; i < count; i++)
@@ -369,7 +369,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask = 0;
 
-       if (ring == &ring->adev->sdma[0].ring)
+       if (ring == &ring->adev->sdma.instance[0].ring)
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
        else
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -454,8 +454,8 @@ static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
  */
 static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
+       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
+       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
@@ -463,7 +463,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
            (adev->mman.buffer_funcs_ring == sdma1))
                amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -500,7 +500,7 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
        u32 f32_cntl;
        int i;
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
                if (enable)
                        f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
@@ -530,7 +530,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
                sdma_v3_0_rlc_stop(adev);
        }
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
                if (enable)
                        f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
@@ -557,8 +557,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
        u32 doorbell;
        int i, j, r;
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               ring = &adev->sdma[i].ring;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                wb_offset = (ring->rptr_offs * 4);
 
                mutex_lock(&adev->srbm_mutex);
@@ -669,23 +669,22 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
        u32 fw_size;
        int i, j;
 
-       if (!adev->sdma[0].fw || !adev->sdma[1].fw)
-               return -EINVAL;
-
        /* halt the MEs */
        sdma_v3_0_enable(adev, false);
 
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
-               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (!adev->sdma.instance[i].fw)
+                       return -EINVAL;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
                amdgpu_ucode_print_sdma_hdr(&hdr->header);
                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
                fw_data = (const __le32 *)
-                       (adev->sdma[i].fw->data +
+                       (adev->sdma.instance[i].fw->data +
                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
                for (j = 0; j < fw_size; j++)
                        WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
-               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
+               WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
        }
 
        return 0;
@@ -701,21 +700,21 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
  */
 static int sdma_v3_0_start(struct amdgpu_device *adev)
 {
-       int r;
+       int r, i;
 
        if (!adev->firmware.smu_load) {
                r = sdma_v3_0_load_microcode(adev);
                if (r)
                        return r;
        } else {
-               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                               AMDGPU_UCODE_ID_SDMA0);
-               if (r)
-                       return -EINVAL;
-               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                               AMDGPU_UCODE_ID_SDMA1);
-               if (r)
-                       return -EINVAL;
+               for (i = 0; i < adev->sdma.num_instances; i++) {
+                       r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                                                        (i == 0) ?
+                                                                        AMDGPU_UCODE_ID_SDMA0 :
+                                                                        AMDGPU_UCODE_ID_SDMA1);
+                       if (r)
+                               return -EINVAL;
+               }
        }
 
        /* unhalt the MEs */
@@ -1013,7 +1012,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
  */
 static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
 {
-       struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring);
+       struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
        u32 pad_count;
        int i;
 
@@ -1071,6 +1070,12 @@ static int sdma_v3_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       switch (adev->asic_type) {
+       default:
+               adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+               break;
+       }
+
        sdma_v3_0_set_ring_funcs(adev);
        sdma_v3_0_set_buffer_funcs(adev);
        sdma_v3_0_set_vm_pte_funcs(adev);
@@ -1082,21 +1087,21 @@ static int sdma_v3_0_early_init(void *handle)
 static int sdma_v3_0_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
-       int r;
+       int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
+       r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
+       r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
 
@@ -1106,33 +1111,23 @@ static int sdma_v3_0_sw_init(void *handle)
                return r;
        }
 
-       ring = &adev->sdma[0].ring;
-       ring->ring_obj = NULL;
-       ring->use_doorbell = true;
-       ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0;
-
-       ring = &adev->sdma[1].ring;
-       ring->ring_obj = NULL;
-       ring->use_doorbell = true;
-       ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1;
-
-       ring = &adev->sdma[0].ring;
-       sprintf(ring->name, "sdma0");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
-
-       ring = &adev->sdma[1].ring;
-       sprintf(ring->name, "sdma1");
-       r = amdgpu_ring_init(adev, ring, 256 * 1024,
-                            SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
-                            &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
-                            AMDGPU_RING_TYPE_SDMA);
-       if (r)
-               return r;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
+               ring->ring_obj = NULL;
+               ring->use_doorbell = true;
+               ring->doorbell_index = (i == 0) ?
+                       AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+
+               sprintf(ring->name, "sdma%d", i);
+               r = amdgpu_ring_init(adev, ring, 256 * 1024,
+                                    SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
+                                    &adev->sdma.trap_irq,
+                                    (i == 0) ?
+                                    AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
+                                    AMDGPU_RING_TYPE_SDMA);
+               if (r)
+                       return r;
+       }
 
        return r;
 }
@@ -1140,9 +1135,10 @@ static int sdma_v3_0_sw_init(void *handle)
 static int sdma_v3_0_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i;
 
-       amdgpu_ring_fini(&adev->sdma[0].ring);
-       amdgpu_ring_fini(&adev->sdma[1].ring);
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
        return 0;
 }
@@ -1222,7 +1218,7 @@ static void sdma_v3_0_print_status(void *handle)
        dev_info(adev->dev, "VI SDMA registers\n");
        dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
                 RREG32(mmSRBM_STATUS2));
-       for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                dev_info(adev->dev, "  SDMA%d_STATUS_REG=0x%08X\n",
                         i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
                dev_info(adev->dev, "  SDMA%d_F32_CNTL=0x%08X\n",
@@ -1367,7 +1363,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
        case 0:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[0].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[0].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1380,7 +1376,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
        case 1:
                switch (queue_id) {
                case 0:
-                       amdgpu_fence_process(&adev->sdma[1].ring);
+                       amdgpu_fence_process(&adev->sdma.instance[1].ring);
                        break;
                case 1:
                        /* XXX compute */
@@ -1468,8 +1464,10 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
 
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs;
-       adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++)
+               adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
 }
 
 static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
@@ -1483,9 +1481,9 @@ static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
 
 static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
 {
-       adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
-       adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
-       adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
+       adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+       adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
+       adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
 }
 
 /**
@@ -1551,7 +1549,7 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
 {
        if (adev->mman.buffer_funcs == NULL) {
                adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
-               adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
+               adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
        }
 }
 
@@ -1566,7 +1564,7 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
        if (adev->vm_manager.vm_pte_funcs == NULL) {
                adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
-               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
+               adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
                adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
        }
 }
index b55ceb14fdcd91e92f7a5924a232490a6419a80a..0bac8702e9348c2ee9c86ed52d6aaf490ef5032f 100644 (file)
@@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
        u32 mask;
        int ret;
 
+       if (pci_is_root_bus(adev->pdev->bus))
+               return;
+
        if (amdgpu_pcie_gen2 == 0)
                return;
 
index 3697eeeecf82a75ef1a5b0ee44e6b4d572841053..7fa1d7a438e9c71ba76477f76d3f2d5379ca5955 100644 (file)
@@ -327,19 +327,49 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
        struct amd_sched_fence *s_fence =
                container_of(cb, struct amd_sched_fence, cb);
        struct amd_gpu_scheduler *sched = s_fence->sched;
+       unsigned long flags;
 
        atomic_dec(&sched->hw_rq_count);
        amd_sched_fence_signal(s_fence);
+       if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+               cancel_delayed_work_sync(&s_fence->dwork);
+               spin_lock_irqsave(&sched->fence_list_lock, flags);
+               list_del_init(&s_fence->list);
+               spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+       }
        fence_put(&s_fence->base);
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
+static void amd_sched_fence_work_func(struct work_struct *work)
+{
+       struct amd_sched_fence *s_fence =
+               container_of(work, struct amd_sched_fence, dwork.work);
+       struct amd_gpu_scheduler *sched = s_fence->sched;
+       struct amd_sched_fence *entity, *tmp;
+       unsigned long flags;
+
+       DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
+
+       /* Clean all pending fences */
+       spin_lock_irqsave(&sched->fence_list_lock, flags);
+       list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
+               DRM_ERROR("  fence no %d\n", entity->base.seqno);
+               cancel_delayed_work(&entity->dwork);
+               list_del_init(&entity->list);
+               fence_put(&entity->base);
+       }
+       spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+}
+
 static int amd_sched_main(void *param)
 {
        struct sched_param sparam = {.sched_priority = 1};
        struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
        int r, count;
 
+       spin_lock_init(&sched->fence_list_lock);
+       INIT_LIST_HEAD(&sched->fence_list);
        sched_setscheduler(current, SCHED_FIFO, &sparam);
 
        while (!kthread_should_stop()) {
@@ -347,6 +377,7 @@ static int amd_sched_main(void *param)
                struct amd_sched_fence *s_fence;
                struct amd_sched_job *sched_job;
                struct fence *fence;
+               unsigned long flags;
 
                wait_event_interruptible(sched->wake_up_worker,
                        kthread_should_stop() ||
@@ -357,6 +388,15 @@ static int amd_sched_main(void *param)
 
                entity = sched_job->s_entity;
                s_fence = sched_job->s_fence;
+
+               if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+                       INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
+                       schedule_delayed_work(&s_fence->dwork, sched->timeout);
+                       spin_lock_irqsave(&sched->fence_list_lock, flags);
+                       list_add_tail(&s_fence->list, &sched->fence_list);
+                       spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+               }
+
                atomic_inc(&sched->hw_rq_count);
                fence = sched->ops->run_job(sched_job);
                if (fence) {
@@ -392,11 +432,12 @@ static int amd_sched_main(void *param)
 */
 int amd_sched_init(struct amd_gpu_scheduler *sched,
                   struct amd_sched_backend_ops *ops,
-                  unsigned hw_submission, const char *name)
+                  unsigned hw_submission, long timeout, const char *name)
 {
        sched->ops = ops;
        sched->hw_submission_limit = hw_submission;
        sched->name = name;
+       sched->timeout = timeout;
        amd_sched_rq_init(&sched->sched_rq);
        amd_sched_rq_init(&sched->kernel_rq);
 
index 80b64dc2221417938347e8ac463a6d3d676b9f0d..929e9aced04195e24ed5ddcae99ab385ce830898 100644 (file)
@@ -68,6 +68,8 @@ struct amd_sched_fence {
        struct amd_gpu_scheduler        *sched;
        spinlock_t                      lock;
        void                            *owner;
+       struct delayed_work             dwork;
+       struct list_head                list;
 };
 
 struct amd_sched_job {
@@ -103,18 +105,21 @@ struct amd_sched_backend_ops {
 struct amd_gpu_scheduler {
        struct amd_sched_backend_ops    *ops;
        uint32_t                        hw_submission_limit;
+       long                            timeout;
        const char                      *name;
        struct amd_sched_rq             sched_rq;
        struct amd_sched_rq             kernel_rq;
        wait_queue_head_t               wake_up_worker;
        wait_queue_head_t               job_scheduled;
        atomic_t                        hw_rq_count;
+       struct list_head                fence_list;
+       spinlock_t                      fence_list_lock;
        struct task_struct              *thread;
 };
 
 int amd_sched_init(struct amd_gpu_scheduler *sched,
                   struct amd_sched_backend_ops *ops,
-                  uint32_t hw_submission, const char *name);
+                  uint32_t hw_submission, long timeout, const char *name);
 void amd_sched_fini(struct amd_gpu_scheduler *sched);
 
 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
index 50ae88ad4d76fb85b863adfd6129ea7a75df2178..eb773e9af313a6715f21dcc1ede2cf98eee07afe 100644 (file)
@@ -14,12 +14,3 @@ config DRM_ARMADA
          This driver provides no built-in acceleration; acceleration is
          performed by other IP found on the SoC.  This driver provides
          kernel mode setting and buffer management to userspace.
-
-config DRM_ARMADA_TDA1998X
-       bool "Support TDA1998X HDMI output"
-       depends on DRM_ARMADA != n
-       depends on I2C && DRM_I2C_NXP_TDA998X = y
-       default y
-       help
-         Support the TDA1998x HDMI output device found on the Solid-Run
-         CuBox.
index d6f43e06150aa62b57e7a71cc6fb424e7265b312..ffd67361577280117dfffedc7b94bda7470b5ee2 100644 (file)
@@ -1,6 +1,5 @@
 armada-y       := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
-                  armada_gem.o armada_output.o armada_overlay.o \
-                  armada_slave.o
+                  armada_gem.o armada_overlay.o
 armada-y       += armada_510.o
 armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
 
index 01ffe9bffe38a9e93d49a811a4803f49866a0861..cebcab5606268f76aa1f9161315ef17ba82981d7 100644 (file)
@@ -20,6 +20,7 @@
 #include "armada_hw.h"
 
 struct armada_frame_work {
+       struct armada_plane_work work;
        struct drm_pending_vblank_event *event;
        struct armada_regs regs[4];
        struct drm_framebuffer *old_fb;
@@ -33,6 +34,23 @@ enum csc_mode {
        CSC_RGB_STUDIO = 2,
 };
 
+static const uint32_t armada_primary_formats[] = {
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_BGR888,
+       DRM_FORMAT_ARGB1555,
+       DRM_FORMAT_ABGR1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_BGR565,
+};
+
 /*
  * A note about interlacing.  Let's consider HDMI 1920x1080i.
  * The timing parameters we have from X are:
@@ -173,49 +191,82 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
        return i;
 }
 
-static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
-       struct armada_frame_work *work)
+static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
+       struct armada_plane *plane)
+{
+       struct armada_plane_work *work = xchg(&plane->work, NULL);
+
+       /* Handle any pending frame work. */
+       if (work) {
+               work->fn(dcrtc, plane, work);
+               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+       }
+
+       wake_up(&plane->frame_wait);
+}
+
+int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
+       struct armada_plane *plane, struct armada_plane_work *work)
 {
-       struct drm_device *dev = dcrtc->crtc.dev;
-       unsigned long flags;
        int ret;
 
-       ret = drm_vblank_get(dev, dcrtc->num);
+       ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
        if (ret) {
                DRM_ERROR("failed to acquire vblank counter\n");
                return ret;
        }
 
-       spin_lock_irqsave(&dev->event_lock, flags);
-       if (!dcrtc->frame_work)
-               dcrtc->frame_work = work;
-       else
-               ret = -EBUSY;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-
+       ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
        if (ret)
-               drm_vblank_put(dev, dcrtc->num);
+               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
 
        return ret;
 }
 
-static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
+int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
 {
-       struct drm_device *dev = dcrtc->crtc.dev;
-       struct armada_frame_work *work = dcrtc->frame_work;
+       return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
+}
 
-       dcrtc->frame_work = NULL;
+struct armada_plane_work *armada_drm_plane_work_cancel(
+       struct armada_crtc *dcrtc, struct armada_plane *plane)
+{
+       struct armada_plane_work *work = xchg(&plane->work, NULL);
 
-       armada_drm_crtc_update_regs(dcrtc, work->regs);
+       if (work)
+               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
 
-       if (work->event)
-               drm_send_vblank_event(dev, dcrtc->num, work->event);
+       return work;
+}
 
-       drm_vblank_put(dev, dcrtc->num);
+static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
+       struct armada_frame_work *work)
+{
+       struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
+
+       return armada_drm_plane_work_queue(dcrtc, plane, &work->work);
+}
+
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
+       struct armada_plane *plane, struct armada_plane_work *work)
+{
+       struct armada_frame_work *fwork = container_of(work, struct armada_frame_work, work);
+       struct drm_device *dev = dcrtc->crtc.dev;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
+       armada_drm_crtc_update_regs(dcrtc, fwork->regs);
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+
+       if (fwork->event) {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               drm_send_vblank_event(dev, dcrtc->num, fwork->event);
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       }
 
        /* Finally, queue the process-half of the cleanup. */
-       __armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
-       kfree(work);
+       __armada_drm_queue_unref_work(dcrtc->crtc.dev, fwork->old_fb);
+       kfree(fwork);
 }
 
 static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
@@ -235,6 +286,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
        work = kmalloc(sizeof(*work), GFP_KERNEL);
        if (work) {
                int i = 0;
+               work->work.fn = armada_drm_crtc_complete_frame_work;
                work->event = NULL;
                work->old_fb = fb;
                armada_reg_queue_end(work->regs, i);
@@ -255,19 +307,14 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
 
 static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
 {
-       struct drm_device *dev = dcrtc->crtc.dev;
+       struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
 
        /*
         * Tell the DRM core that vblank IRQs aren't going to happen for
         * a while.  This cleans up any pending vblank events for us.
         */
        drm_crtc_vblank_off(&dcrtc->crtc);
-
-       /* Handle any pending flip event. */
-       spin_lock_irq(&dev->event_lock);
-       if (dcrtc->frame_work)
-               armada_drm_crtc_complete_frame_work(dcrtc);
-       spin_unlock_irq(&dev->event_lock);
+       armada_drm_plane_work_run(dcrtc, plane);
 }
 
 void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
@@ -287,7 +334,11 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
 
        if (dcrtc->dpms != dpms) {
                dcrtc->dpms = dpms;
+               if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
+                       WARN_ON(clk_prepare_enable(dcrtc->clk));
                armada_drm_crtc_update(dcrtc);
+               if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
+                       clk_disable_unprepare(dcrtc->clk);
                if (dpms_blanked(dpms))
                        armada_drm_vblank_off(dcrtc);
                else
@@ -310,17 +361,11 @@ static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
        /*
         * If we have an overlay plane associated with this CRTC, disable
         * it before the modeset to avoid its coordinates being outside
-        * the new mode parameters.  DRM doesn't provide help with this.
+        * the new mode parameters.
         */
        plane = dcrtc->plane;
-       if (plane) {
-               struct drm_framebuffer *fb = plane->fb;
-
-               plane->funcs->disable_plane(plane);
-               plane->fb = NULL;
-               plane->crtc = NULL;
-               drm_framebuffer_unreference(fb);
-       }
+       if (plane)
+               drm_plane_force_disable(plane);
 }
 
 /* The mode_config.mutex will be held for this call */
@@ -356,8 +401,8 @@ static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
 
 static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
 {
-       struct armada_vbl_event *e, *n;
        void __iomem *base = dcrtc->base;
+       struct drm_plane *ovl_plane;
 
        if (stat & DMA_FF_UNDERFLOW)
                DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
@@ -368,11 +413,10 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
                drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
 
        spin_lock(&dcrtc->irq_lock);
-
-       list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
-               list_del_init(&e->node);
-               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
-               e->fn(dcrtc, e->data);
+       ovl_plane = dcrtc->plane;
+       if (ovl_plane) {
+               struct armada_plane *plane = drm_to_armada_plane(ovl_plane);
+               armada_drm_plane_work_run(dcrtc, plane);
        }
 
        if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
@@ -404,14 +448,8 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
        spin_unlock(&dcrtc->irq_lock);
 
        if (stat & GRA_FRAME_IRQ) {
-               struct drm_device *dev = dcrtc->crtc.dev;
-
-               spin_lock(&dev->event_lock);
-               if (dcrtc->frame_work)
-                       armada_drm_crtc_complete_frame_work(dcrtc);
-               spin_unlock(&dev->event_lock);
-
-               wake_up(&dcrtc->frame_wait);
+               struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
+               armada_drm_plane_work_run(dcrtc, plane);
        }
 }
 
@@ -527,7 +565,8 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
                adj->crtc_vtotal, tm, bm);
 
        /* Wait for pending flips to complete */
-       wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+       armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
+                                  MAX_SCHEDULE_TIMEOUT);
 
        drm_crtc_vblank_off(crtc);
 
@@ -537,6 +576,13 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
                writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
        }
 
+       /*
+        * If we are blanked, we would have disabled the clock.  Re-enable
+        * it so that compute_clock() does the right thing.
+        */
+       if (!IS_ERR(dcrtc->clk) && dpms_blanked(dcrtc->dpms))
+               WARN_ON(clk_prepare_enable(dcrtc->clk));
+
        /* Now compute the divider for real */
        dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
 
@@ -637,7 +683,8 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        armada_reg_queue_end(regs, i);
 
        /* Wait for pending flips to complete */
-       wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+       armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
+                                  MAX_SCHEDULE_TIMEOUT);
 
        /* Take a reference to the new fb as we're using it */
        drm_framebuffer_reference(crtc->primary->fb);
@@ -651,18 +698,47 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        return 0;
 }
 
+void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
+       struct drm_plane *plane)
+{
+       u32 sram_para1, dma_ctrl0_mask;
+
+       /*
+        * Drop our reference on any framebuffer attached to this plane.
+        * We don't need to NULL this out as drm_plane_force_disable(),
+        * and __setplane_internal() will do so for an overlay plane, and
+        * __drm_helper_disable_unused_functions() will do so for the
+        * primary plane.
+        */
+       if (plane->fb)
+               drm_framebuffer_unreference(plane->fb);
+
+       /* Power down the Y/U/V FIFOs */
+       sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
+
+       /* Power down most RAMs and FIFOs if this is the primary plane */
+       if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+               sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+                             CFG_PDWN32x32 | CFG_PDWN64x66;
+               dma_ctrl0_mask = CFG_GRA_ENA;
+       } else {
+               dma_ctrl0_mask = CFG_DMA_ENA;
+       }
+
+       spin_lock_irq(&dcrtc->irq_lock);
+       armada_updatel(0, dma_ctrl0_mask, dcrtc->base + LCD_SPU_DMA_CTRL0);
+       spin_unlock_irq(&dcrtc->irq_lock);
+
+       armada_updatel(sram_para1, 0, dcrtc->base + LCD_SPU_SRAM_PARA1);
+}
+
 /* The mode_config.mutex will be held for this call */
 static void armada_drm_crtc_disable(struct drm_crtc *crtc)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
 
        armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-       armada_drm_crtc_finish_fb(dcrtc, crtc->primary->fb, true);
-
-       /* Power down most RAMs and FIFOs */
-       writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
-                      CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
-                      CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+       armada_drm_crtc_plane_disable(dcrtc, crtc->primary);
 }
 
 static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
@@ -920,8 +996,6 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
        struct armada_frame_work *work;
-       struct drm_device *dev = crtc->dev;
-       unsigned long flags;
        unsigned i;
        int ret;
 
@@ -933,6 +1007,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
        if (!work)
                return -ENOMEM;
 
+       work->work.fn = armada_drm_crtc_complete_frame_work;
        work->event = event;
        work->old_fb = dcrtc->crtc.primary->fb;
 
@@ -966,12 +1041,8 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
         * Finally, if the display is blanked, we won't receive an
         * interrupt, so complete it now.
         */
-       if (dpms_blanked(dcrtc->dpms)) {
-               spin_lock_irqsave(&dev->event_lock, flags);
-               if (dcrtc->frame_work)
-                       armada_drm_crtc_complete_frame_work(dcrtc);
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-       }
+       if (dpms_blanked(dcrtc->dpms))
+               armada_drm_plane_work_run(dcrtc, drm_to_armada_plane(dcrtc->crtc.primary));
 
        return 0;
 }
@@ -1012,6 +1083,19 @@ static struct drm_crtc_funcs armada_crtc_funcs = {
        .set_property   = armada_drm_crtc_set_property,
 };
 
+static const struct drm_plane_funcs armada_primary_plane_funcs = {
+       .update_plane   = drm_primary_helper_update,
+       .disable_plane  = drm_primary_helper_disable,
+       .destroy        = drm_primary_helper_destroy,
+};
+
+int armada_drm_plane_init(struct armada_plane *plane)
+{
+       init_waitqueue_head(&plane->frame_wait);
+
+       return 0;
+}
+
 static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
        { CSC_AUTO,        "Auto" },
        { CSC_YUV_CCIR601, "CCIR601" },
@@ -1044,12 +1128,13 @@ static int armada_drm_crtc_create_properties(struct drm_device *dev)
        return 0;
 }
 
-int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
+static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
        struct resource *res, int irq, const struct armada_variant *variant,
        struct device_node *port)
 {
        struct armada_private *priv = drm->dev_private;
        struct armada_crtc *dcrtc;
+       struct armada_plane *primary;
        void __iomem *base;
        int ret;
 
@@ -1080,8 +1165,6 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
        dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
        spin_lock_init(&dcrtc->irq_lock);
        dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
-       INIT_LIST_HEAD(&dcrtc->vbl_list);
-       init_waitqueue_head(&dcrtc->frame_wait);
 
        /* Initialize some registers which we don't otherwise set */
        writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
@@ -1118,7 +1201,32 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
        priv->dcrtc[dcrtc->num] = dcrtc;
 
        dcrtc->crtc.port = port;
-       drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs);
+
+       primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+       if (!primary)
+               return -ENOMEM;
+
+       ret = armada_drm_plane_init(primary);
+       if (ret) {
+               kfree(primary);
+               return ret;
+       }
+
+       ret = drm_universal_plane_init(drm, &primary->base, 0,
+                                      &armada_primary_plane_funcs,
+                                      armada_primary_formats,
+                                      ARRAY_SIZE(armada_primary_formats),
+                                      DRM_PLANE_TYPE_PRIMARY);
+       if (ret) {
+               kfree(primary);
+               return ret;
+       }
+
+       ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
+                                       &armada_crtc_funcs);
+       if (ret)
+               goto err_crtc_init;
+
        drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
 
        drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
@@ -1127,6 +1235,10 @@ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
                                   dcrtc->csc_rgb_mode);
 
        return armada_overlay_plane_create(drm, 1 << dcrtc->num);
+
+err_crtc_init:
+       primary->base.funcs->destroy(&primary->base);
+       return ret;
 }
 
 static int
index 98102a5a9af578c510dcec186b67dbf6f279007c..04fdd22d483bd8e56787b95bbfcfc8b490f52e91 100644 (file)
@@ -31,9 +31,30 @@ struct armada_regs {
 #define armada_reg_queue_end(_r, _i)           \
        armada_reg_queue_mod(_r, _i, 0, 0, ~0)
 
-struct armada_frame_work;
+struct armada_crtc;
+struct armada_plane;
 struct armada_variant;
 
+struct armada_plane_work {
+       void                    (*fn)(struct armada_crtc *,
+                                     struct armada_plane *,
+                                     struct armada_plane_work *);
+};
+
+struct armada_plane {
+       struct drm_plane        base;
+       wait_queue_head_t       frame_wait;
+       struct armada_plane_work *work;
+};
+#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
+
+int armada_drm_plane_init(struct armada_plane *plane);
+int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
+       struct armada_plane *plane, struct armada_plane_work *work);
+int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
+struct armada_plane_work *armada_drm_plane_work_cancel(
+       struct armada_crtc *dcrtc, struct armada_plane *plane);
+
 struct armada_crtc {
        struct drm_crtc         crtc;
        const struct armada_variant *variant;
@@ -66,25 +87,20 @@ struct armada_crtc {
        uint32_t                dumb_ctrl;
        uint32_t                spu_iopad_ctrl;
 
-       wait_queue_head_t       frame_wait;
-       struct armada_frame_work *frame_work;
-
        spinlock_t              irq_lock;
        uint32_t                irq_ena;
-       struct list_head        vbl_list;
 };
 #define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
 
-struct device_node;
-int armada_drm_crtc_create(struct drm_device *, struct device *,
-       struct resource *, int, const struct armada_variant *,
-       struct device_node *);
 void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
 void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
 void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
 void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
 void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
 
+void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
+       struct drm_plane *plane);
+
 extern struct platform_driver armada_lcd_platform_driver;
 
 #endif
index 5f6aef0dca59e5fbef928f19429c69b4351f6f63..4df6f2af2b21056854e961f703c6f524aeab0c4b 100644 (file)
@@ -37,22 +37,6 @@ static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
        return ALIGN(pitch, 128);
 }
 
-struct armada_vbl_event {
-       struct list_head        node;
-       void                    *data;
-       void                    (*fn)(struct armada_crtc *, void *);
-};
-void armada_drm_vbl_event_add(struct armada_crtc *,
-       struct armada_vbl_event *);
-void armada_drm_vbl_event_remove(struct armada_crtc *,
-       struct armada_vbl_event *);
-#define armada_drm_vbl_event_init(_e, _f, _d) do {     \
-       struct armada_vbl_event *__e = _e;              \
-       INIT_LIST_HEAD(&__e->node);                     \
-       __e->data = _d;                                 \
-       __e->fn = _f;                                   \
-} while (0)
-
 
 struct armada_private;
 
index 225034b74cda7554cb6e8597c844dd77aa36558a..63d909e5f63b11e0f6d8d6603898696980fe5983 100644 (file)
 #include <drm/armada_drm.h>
 #include "armada_ioctlP.h"
 
-#ifdef CONFIG_DRM_ARMADA_TDA1998X
-#include <drm/i2c/tda998x.h>
-#include "armada_slave.h"
-
-static struct tda998x_encoder_params params = {
-       /* With 0x24, there is no translation between vp_out and int_vp
-       FB      LCD out Pins    VIP     Int Vp
-       R:23:16 R:7:0   VPC7:0  7:0     7:0[R]
-       G:15:8  G:15:8  VPB7:0  23:16   23:16[G]
-       B:7:0   B:23:16 VPA7:0  15:8    15:8[B]
-       */
-       .swap_a = 2,
-       .swap_b = 3,
-       .swap_c = 4,
-       .swap_d = 5,
-       .swap_e = 0,
-       .swap_f = 1,
-       .audio_cfg = BIT(2),
-       .audio_frame[1] = 1,
-       .audio_format = AFMT_SPDIF,
-       .audio_sample_rate = 44100,
-};
-
-static const struct armada_drm_slave_config tda19988_config = {
-       .i2c_adapter_id = 0,
-       .crtcs = 1 << 0, /* Only LCD0 at the moment */
-       .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
-       .interlace_allowed = true,
-       .info = {
-               .type = "tda998x",
-               .addr = 0x70,
-               .platform_data = &params,
-       },
-};
-#endif
-
-static bool is_componentized(struct device *dev)
-{
-       return dev->of_node || dev->platform_data;
-}
-
 static void armada_drm_unref_work(struct work_struct *work)
 {
        struct armada_private *priv =
@@ -91,16 +50,11 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
 
 static int armada_drm_load(struct drm_device *dev, unsigned long flags)
 {
-       const struct platform_device_id *id;
-       const struct armada_variant *variant;
        struct armada_private *priv;
-       struct resource *res[ARRAY_SIZE(priv->dcrtc)];
        struct resource *mem = NULL;
-       int ret, n, i;
-
-       memset(res, 0, sizeof(res));
+       int ret, n;
 
-       for (n = i = 0; ; n++) {
+       for (n = 0; ; n++) {
                struct resource *r = platform_get_resource(dev->platformdev,
                                                           IORESOURCE_MEM, n);
                if (!r)
@@ -109,8 +63,6 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
                /* Resources above 64K are graphics memory */
                if (resource_size(r) > SZ_64K)
                        mem = r;
-               else if (i < ARRAY_SIZE(priv->dcrtc))
-                       res[i++] = r;
                else
                        return -EINVAL;
        }
@@ -131,13 +83,6 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
        platform_set_drvdata(dev->platformdev, dev);
        dev->dev_private = priv;
 
-       /* Get the implementation specific driver data. */
-       id = platform_get_device_id(dev->platformdev);
-       if (!id)
-               return -ENXIO;
-
-       variant = (const struct armada_variant *)id->driver_data;
-
        INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
        INIT_KFIFO(priv->fb_unref);
 
@@ -157,34 +102,9 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
        dev->mode_config.funcs = &armada_drm_mode_config_funcs;
        drm_mm_init(&priv->linear, mem->start, resource_size(mem));
 
-       /* Create all LCD controllers */
-       for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
-               int irq;
-
-               if (!res[n])
-                       break;
-
-               irq = platform_get_irq(dev->platformdev, n);
-               if (irq < 0)
-                       goto err_kms;
-
-               ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
-                                            variant, NULL);
-               if (ret)
-                       goto err_kms;
-       }
-
-       if (is_componentized(dev->dev)) {
-               ret = component_bind_all(dev->dev, dev);
-               if (ret)
-                       goto err_kms;
-       } else {
-#ifdef CONFIG_DRM_ARMADA_TDA1998X
-               ret = armada_drm_connector_slave_create(dev, &tda19988_config);
-               if (ret)
-                       goto err_kms;
-#endif
-       }
+       ret = component_bind_all(dev->dev, dev);
+       if (ret)
+               goto err_kms;
 
        ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
        if (ret)
@@ -202,8 +122,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
        return 0;
 
  err_comp:
-       if (is_componentized(dev->dev))
-               component_unbind_all(dev->dev, dev);
+       component_unbind_all(dev->dev, dev);
  err_kms:
        drm_mode_config_cleanup(dev);
        drm_mm_takedown(&priv->linear);
@@ -219,8 +138,7 @@ static int armada_drm_unload(struct drm_device *dev)
        drm_kms_helper_poll_fini(dev);
        armada_fbdev_fini(dev);
 
-       if (is_componentized(dev->dev))
-               component_unbind_all(dev->dev, dev);
+       component_unbind_all(dev->dev, dev);
 
        drm_mode_config_cleanup(dev);
        drm_mm_takedown(&priv->linear);
@@ -230,41 +148,18 @@ static int armada_drm_unload(struct drm_device *dev)
        return 0;
 }
 
-void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
-       struct armada_vbl_event *evt)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dcrtc->irq_lock, flags);
-       if (list_empty(&evt->node)) {
-               list_add_tail(&evt->node, &dcrtc->vbl_list);
-
-               drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
-       }
-       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
-       struct armada_vbl_event *evt)
-{
-       if (!list_empty(&evt->node)) {
-               list_del_init(&evt->node);
-               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
-       }
-}
-
 /* These are called under the vbl_lock. */
-static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
+static int armada_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct armada_private *priv = dev->dev_private;
-       armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+       armada_drm_crtc_enable_irq(priv->dcrtc[pipe], VSYNC_IRQ_ENA);
        return 0;
 }
 
-static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
+static void armada_drm_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct armada_private *priv = dev->dev_private;
-       armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+       armada_drm_crtc_disable_irq(priv->dcrtc[pipe], VSYNC_IRQ_ENA);
 }
 
 static struct drm_ioctl_desc armada_ioctls[] = {
@@ -300,7 +195,7 @@ static struct drm_driver armada_drm_driver = {
        .lastclose              = armada_drm_lastclose,
        .unload                 = armada_drm_unload,
        .set_busid              = drm_platform_set_busid,
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = armada_drm_enable_vblank,
        .disable_vblank         = armada_drm_disable_vblank,
 #ifdef CONFIG_DEBUG_FS
@@ -435,37 +330,28 @@ static const struct component_master_ops armada_master_ops = {
 
 static int armada_drm_probe(struct platform_device *pdev)
 {
-       if (is_componentized(&pdev->dev)) {
-               struct component_match *match = NULL;
-               int ret;
-
-               ret = armada_drm_find_components(&pdev->dev, &match);
-               if (ret < 0)
-                       return ret;
-
-               return component_master_add_with_match(&pdev->dev,
-                               &armada_master_ops, match);
-       } else {
-               return drm_platform_init(&armada_drm_driver, pdev);
-       }
+       struct component_match *match = NULL;
+       int ret;
+
+       ret = armada_drm_find_components(&pdev->dev, &match);
+       if (ret < 0)
+               return ret;
+
+       return component_master_add_with_match(&pdev->dev, &armada_master_ops,
+                                              match);
 }
 
 static int armada_drm_remove(struct platform_device *pdev)
 {
-       if (is_componentized(&pdev->dev))
-               component_master_del(&pdev->dev, &armada_master_ops);
-       else
-               drm_put_dev(platform_get_drvdata(pdev));
+       component_master_del(&pdev->dev, &armada_master_ops);
        return 0;
 }
 
 static const struct platform_device_id armada_drm_platform_ids[] = {
        {
                .name           = "armada-drm",
-               .driver_data    = (unsigned long)&armada510_ops,
        }, {
                .name           = "armada-510-drm",
-               .driver_data    = (unsigned long)&armada510_ops,
        },
        { },
 };
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
deleted file mode 100644 (file)
index 5a98231..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (C) 2012 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
-#include "armada_output.h"
-#include "armada_drm.h"
-
-struct armada_connector {
-       struct drm_connector conn;
-       const struct armada_output_type *type;
-};
-
-#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
-
-struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
-{
-       struct drm_encoder *enc = conn->encoder;
-
-       return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
-}
-
-static enum drm_connector_status armada_drm_connector_detect(
-       struct drm_connector *conn, bool force)
-{
-       struct armada_connector *dconn = drm_to_armada_conn(conn);
-       enum drm_connector_status status = connector_status_disconnected;
-
-       if (dconn->type->detect) {
-               status = dconn->type->detect(conn, force);
-       } else {
-               struct drm_encoder *enc = armada_drm_connector_encoder(conn);
-
-               if (enc)
-                       status = encoder_helper_funcs(enc)->detect(enc, conn);
-       }
-
-       return status;
-}
-
-static void armada_drm_connector_destroy(struct drm_connector *conn)
-{
-       struct armada_connector *dconn = drm_to_armada_conn(conn);
-
-       drm_connector_unregister(conn);
-       drm_connector_cleanup(conn);
-       kfree(dconn);
-}
-
-static int armada_drm_connector_set_property(struct drm_connector *conn,
-       struct drm_property *property, uint64_t value)
-{
-       struct armada_connector *dconn = drm_to_armada_conn(conn);
-
-       if (!dconn->type->set_property)
-               return -EINVAL;
-
-       return dconn->type->set_property(conn, property, value);
-}
-
-static const struct drm_connector_funcs armada_drm_conn_funcs = {
-       .dpms           = drm_helper_connector_dpms,
-       .fill_modes     = drm_helper_probe_single_connector_modes,
-       .detect         = armada_drm_connector_detect,
-       .destroy        = armada_drm_connector_destroy,
-       .set_property   = armada_drm_connector_set_property,
-};
-
-/* Shouldn't this be a generic helper function? */
-int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
-       struct drm_display_mode *mode)
-{
-       struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
-       int valid = MODE_BAD;
-
-       if (encoder) {
-               struct drm_encoder_slave *slave = to_encoder_slave(encoder);
-
-               valid = slave->slave_funcs->mode_valid(encoder, mode);
-       }
-       return valid;
-}
-
-int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
-       struct drm_property *property, uint64_t value)
-{
-       struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
-       int rc = -EINVAL;
-
-       if (encoder) {
-               struct drm_encoder_slave *slave = to_encoder_slave(encoder);
-
-               rc = slave->slave_funcs->set_property(encoder, conn, property,
-                                                     value);
-       }
-       return rc;
-}
-
-int armada_output_create(struct drm_device *dev,
-       const struct armada_output_type *type, const void *data)
-{
-       struct armada_connector *dconn;
-       int ret;
-
-       dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
-       if (!dconn)
-               return -ENOMEM;
-
-       dconn->type = type;
-
-       ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
-                                type->connector_type);
-       if (ret) {
-               DRM_ERROR("unable to init connector\n");
-               goto err_destroy_dconn;
-       }
-
-       ret = type->create(&dconn->conn, data);
-       if (ret)
-               goto err_conn;
-
-       ret = drm_connector_register(&dconn->conn);
-       if (ret)
-               goto err_sysfs;
-
-       return 0;
-
- err_sysfs:
-       if (dconn->conn.encoder)
-               dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
- err_conn:
-       drm_connector_cleanup(&dconn->conn);
- err_destroy_dconn:
-       kfree(dconn);
-       return ret;
-}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
deleted file mode 100644 (file)
index f448785..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2012 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef ARMADA_CONNETOR_H
-#define ARMADA_CONNETOR_H
-
-#define encoder_helper_funcs(encoder) \
-       ((const struct drm_encoder_helper_funcs *)encoder->helper_private)
-
-struct armada_output_type {
-       int connector_type;
-       enum drm_connector_status (*detect)(struct drm_connector *, bool);
-       int (*create)(struct drm_connector *, const void *);
-       int (*set_property)(struct drm_connector *, struct drm_property *,
-                           uint64_t);
-};
-
-struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
-
-int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
-       struct drm_display_mode *mode);
-
-int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
-       struct drm_property *property, uint64_t value);
-
-int armada_output_create(struct drm_device *dev,
-       const struct armada_output_type *type, const void *data);
-
-#endif
index e939faba7fcca8b0ff737008de124d3b7f96b3a5..5c22b380f8f3e48dd9c2f6d7d1346c2bfc740b48 100644 (file)
@@ -16,7 +16,7 @@
 #include <drm/armada_drm.h>
 #include "armada_ioctlP.h"
 
-struct armada_plane_properties {
+struct armada_ovl_plane_properties {
        uint32_t colorkey_yr;
        uint32_t colorkey_ug;
        uint32_t colorkey_vb;
@@ -29,26 +29,25 @@ struct armada_plane_properties {
        uint32_t colorkey_mode;
 };
 
-struct armada_plane {
-       struct drm_plane base;
-       spinlock_t lock;
+struct armada_ovl_plane {
+       struct armada_plane base;
        struct drm_framebuffer *old_fb;
        uint32_t src_hw;
        uint32_t dst_hw;
        uint32_t dst_yx;
        uint32_t ctrl0;
        struct {
-               struct armada_vbl_event update;
+               struct armada_plane_work work;
                struct armada_regs regs[13];
-               wait_queue_head_t wait;
        } vbl;
-       struct armada_plane_properties prop;
+       struct armada_ovl_plane_properties prop;
 };
-#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
+#define drm_to_armada_ovl_plane(p) \
+       container_of(p, struct armada_ovl_plane, base.base)
 
 
 static void
-armada_ovl_update_attr(struct armada_plane_properties *prop,
+armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
        struct armada_crtc *dcrtc)
 {
        writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
@@ -71,32 +70,34 @@ armada_ovl_update_attr(struct armada_plane_properties *prop,
        spin_unlock_irq(&dcrtc->irq_lock);
 }
 
-/* === Plane support === */
-static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
+static void armada_ovl_retire_fb(struct armada_ovl_plane *dplane,
+       struct drm_framebuffer *fb)
 {
-       struct armada_plane *dplane = data;
-       struct drm_framebuffer *fb;
+       struct drm_framebuffer *old_fb;
 
-       armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
+       old_fb = xchg(&dplane->old_fb, fb);
 
-       spin_lock(&dplane->lock);
-       fb = dplane->old_fb;
-       dplane->old_fb = NULL;
-       spin_unlock(&dplane->lock);
+       if (old_fb)
+               armada_drm_queue_unref_work(dplane->base.base.dev, old_fb);
+}
 
-       if (fb)
-               armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
+/* === Plane support === */
+static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
+       struct armada_plane *plane, struct armada_plane_work *work)
+{
+       struct armada_ovl_plane *dplane = container_of(plane, struct armada_ovl_plane, base);
 
-       wake_up(&dplane->vbl.wait);
+       armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
+       armada_ovl_retire_fb(dplane, NULL);
 }
 
 static int
-armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        struct drm_framebuffer *fb,
        int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
        uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
 {
-       struct armada_plane *dplane = drm_to_armada_plane(plane);
+       struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
        struct drm_rect src = {
                .x1 = src_x,
@@ -160,9 +161,8 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
                               dcrtc->base + LCD_SPU_SRAM_PARA1);
        }
 
-       wait_event_timeout(dplane->vbl.wait,
-                          list_empty(&dplane->vbl.update.node),
-                          HZ/25);
+       if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
+               armada_drm_plane_work_cancel(dcrtc, &dplane->base);
 
        if (plane->fb != fb) {
                struct armada_gem_object *obj = drm_fb_obj(fb);
@@ -175,17 +175,8 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
                 */
                drm_framebuffer_reference(fb);
 
-               if (plane->fb) {
-                       struct drm_framebuffer *older_fb;
-
-                       spin_lock_irq(&dplane->lock);
-                       older_fb = dplane->old_fb;
-                       dplane->old_fb = plane->fb;
-                       spin_unlock_irq(&dplane->lock);
-                       if (older_fb)
-                               armada_drm_queue_unref_work(dcrtc->crtc.dev,
-                                                           older_fb);
-               }
+               if (plane->fb)
+                       armada_ovl_retire_fb(dplane, plane->fb);
 
                src_y = src.y1 >> 16;
                src_x = src.x1 >> 16;
@@ -262,60 +253,50 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        }
        if (idx) {
                armada_reg_queue_end(dplane->vbl.regs, idx);
-               armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
+               armada_drm_plane_work_queue(dcrtc, &dplane->base,
+                                           &dplane->vbl.work);
        }
        return 0;
 }
 
-static int armada_plane_disable(struct drm_plane *plane)
+static int armada_ovl_plane_disable(struct drm_plane *plane)
 {
-       struct armada_plane *dplane = drm_to_armada_plane(plane);
+       struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
        struct drm_framebuffer *fb;
        struct armada_crtc *dcrtc;
 
-       if (!dplane->base.crtc)
+       if (!dplane->base.base.crtc)
                return 0;
 
-       dcrtc = drm_to_armada_crtc(dplane->base.crtc);
-       dcrtc->plane = NULL;
-
-       spin_lock_irq(&dcrtc->irq_lock);
-       armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
-       armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
-       dplane->ctrl0 = 0;
-       spin_unlock_irq(&dcrtc->irq_lock);
+       dcrtc = drm_to_armada_crtc(dplane->base.base.crtc);
 
-       /* Power down the Y/U/V FIFOs */
-       armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
-                      dcrtc->base + LCD_SPU_SRAM_PARA1);
+       armada_drm_plane_work_cancel(dcrtc, &dplane->base);
+       armada_drm_crtc_plane_disable(dcrtc, plane);
 
-       if (plane->fb)
-               drm_framebuffer_unreference(plane->fb);
+       dcrtc->plane = NULL;
+       dplane->ctrl0 = 0;
 
-       spin_lock_irq(&dplane->lock);
-       fb = dplane->old_fb;
-       dplane->old_fb = NULL;
-       spin_unlock_irq(&dplane->lock);
+       fb = xchg(&dplane->old_fb, NULL);
        if (fb)
                drm_framebuffer_unreference(fb);
 
        return 0;
 }
 
-static void armada_plane_destroy(struct drm_plane *plane)
+static void armada_ovl_plane_destroy(struct drm_plane *plane)
 {
-       struct armada_plane *dplane = drm_to_armada_plane(plane);
+       struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
 
        drm_plane_cleanup(plane);
 
        kfree(dplane);
 }
 
-static int armada_plane_set_property(struct drm_plane *plane,
+static int armada_ovl_plane_set_property(struct drm_plane *plane,
        struct drm_property *property, uint64_t val)
 {
        struct armada_private *priv = plane->dev->dev_private;
-       struct armada_plane *dplane = drm_to_armada_plane(plane);
+       struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
        bool update_attr = false;
 
        if (property == priv->colorkey_prop) {
@@ -372,21 +353,21 @@ static int armada_plane_set_property(struct drm_plane *plane,
                update_attr = true;
        }
 
-       if (update_attr && dplane->base.crtc)
+       if (update_attr && dplane->base.base.crtc)
                armada_ovl_update_attr(&dplane->prop,
-                                      drm_to_armada_crtc(dplane->base.crtc));
+                                      drm_to_armada_crtc(dplane->base.base.crtc));
 
        return 0;
 }
 
-static const struct drm_plane_funcs armada_plane_funcs = {
-       .update_plane   = armada_plane_update,
-       .disable_plane  = armada_plane_disable,
-       .destroy        = armada_plane_destroy,
-       .set_property   = armada_plane_set_property,
+static const struct drm_plane_funcs armada_ovl_plane_funcs = {
+       .update_plane   = armada_ovl_plane_update,
+       .disable_plane  = armada_ovl_plane_disable,
+       .destroy        = armada_ovl_plane_destroy,
+       .set_property   = armada_ovl_plane_set_property,
 };
 
-static const uint32_t armada_formats[] = {
+static const uint32_t armada_ovl_formats[] = {
        DRM_FORMAT_UYVY,
        DRM_FORMAT_YUYV,
        DRM_FORMAT_YUV420,
@@ -456,7 +437,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
 {
        struct armada_private *priv = dev->dev_private;
        struct drm_mode_object *mobj;
-       struct armada_plane *dplane;
+       struct armada_ovl_plane *dplane;
        int ret;
 
        ret = armada_overlay_create_properties(dev);
@@ -467,13 +448,23 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
        if (!dplane)
                return -ENOMEM;
 
-       spin_lock_init(&dplane->lock);
-       init_waitqueue_head(&dplane->vbl.wait);
-       armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
-                                 dplane);
+       ret = armada_drm_plane_init(&dplane->base);
+       if (ret) {
+               kfree(dplane);
+               return ret;
+       }
+
+       dplane->vbl.work.fn = armada_ovl_plane_work;
 
-       drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
-                      armada_formats, ARRAY_SIZE(armada_formats), false);
+       ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
+                                      &armada_ovl_plane_funcs,
+                                      armada_ovl_formats,
+                                      ARRAY_SIZE(armada_ovl_formats),
+                                      DRM_PLANE_TYPE_OVERLAY);
+       if (ret) {
+               kfree(dplane);
+               return ret;
+       }
 
        dplane->prop.colorkey_yr = 0xfefefe00;
        dplane->prop.colorkey_ug = 0x01010100;
@@ -483,7 +474,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
        dplane->prop.contrast = 0x4000;
        dplane->prop.saturation = 0x4000;
 
-       mobj = &dplane->base.base;
+       mobj = &dplane->base.base.base;
        drm_object_attach_property(mobj, priv->colorkey_prop,
                                   0x0101fe);
        drm_object_attach_property(mobj, priv->colorkey_min_prop,
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
deleted file mode 100644 (file)
index 00d0fac..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2012 Russell King
- *  Rewritten from the dovefb driver, and Armada510 manuals.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
-#include "armada_drm.h"
-#include "armada_output.h"
-#include "armada_slave.h"
-
-static int armada_drm_slave_get_modes(struct drm_connector *conn)
-{
-       struct drm_encoder *enc = armada_drm_connector_encoder(conn);
-       int count = 0;
-
-       if (enc) {
-               struct drm_encoder_slave *slave = to_encoder_slave(enc);
-
-               count = slave->slave_funcs->get_modes(enc, conn);
-       }
-
-       return count;
-}
-
-static void armada_drm_slave_destroy(struct drm_encoder *enc)
-{
-       struct drm_encoder_slave *slave = to_encoder_slave(enc);
-       struct i2c_client *client = drm_i2c_encoder_get_client(enc);
-
-       if (slave->slave_funcs)
-               slave->slave_funcs->destroy(enc);
-       if (client)
-               i2c_put_adapter(client->adapter);
-
-       drm_encoder_cleanup(&slave->base);
-       kfree(slave);
-}
-
-static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
-       .destroy        = armada_drm_slave_destroy,
-};
-
-static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
-       .get_modes      = armada_drm_slave_get_modes,
-       .mode_valid     = armada_drm_slave_encoder_mode_valid,
-       .best_encoder   = armada_drm_connector_encoder,
-};
-
-static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
-       .dpms = drm_i2c_encoder_dpms,
-       .save = drm_i2c_encoder_save,
-       .restore = drm_i2c_encoder_restore,
-       .mode_fixup = drm_i2c_encoder_mode_fixup,
-       .prepare = drm_i2c_encoder_prepare,
-       .commit = drm_i2c_encoder_commit,
-       .mode_set = drm_i2c_encoder_mode_set,
-       .detect = drm_i2c_encoder_detect,
-};
-
-static int
-armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
-{
-       const struct armada_drm_slave_config *config = data;
-       struct drm_encoder_slave *slave;
-       struct i2c_adapter *adap;
-       int ret;
-
-       conn->interlace_allowed = config->interlace_allowed;
-       conn->doublescan_allowed = config->doublescan_allowed;
-       conn->polled = config->polled;
-
-       drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
-
-       slave = kzalloc(sizeof(*slave), GFP_KERNEL);
-       if (!slave)
-               return -ENOMEM;
-
-       slave->base.possible_crtcs = config->crtcs;
-
-       adap = i2c_get_adapter(config->i2c_adapter_id);
-       if (!adap) {
-               kfree(slave);
-               return -EPROBE_DEFER;
-       }
-
-       ret = drm_encoder_init(conn->dev, &slave->base,
-                              &armada_drm_slave_encoder_funcs,
-                              DRM_MODE_ENCODER_TMDS);
-       if (ret) {
-               DRM_ERROR("unable to init encoder\n");
-               i2c_put_adapter(adap);
-               kfree(slave);
-               return ret;
-       }
-
-       ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
-       i2c_put_adapter(adap);
-       if (ret) {
-               DRM_ERROR("unable to init encoder slave\n");
-               armada_drm_slave_destroy(&slave->base);
-               return ret;
-       }
-
-       drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
-
-       ret = slave->slave_funcs->create_resources(&slave->base, conn);
-       if (ret) {
-               armada_drm_slave_destroy(&slave->base);
-               return ret;
-       }
-
-       ret = drm_mode_connector_attach_encoder(conn, &slave->base);
-       if (ret) {
-               armada_drm_slave_destroy(&slave->base);
-               return ret;
-       }
-
-       conn->encoder = &slave->base;
-
-       return ret;
-}
-
-static const struct armada_output_type armada_drm_conn_slave = {
-       .connector_type = DRM_MODE_CONNECTOR_HDMIA,
-       .create         = armada_drm_conn_slave_create,
-       .set_property   = armada_drm_slave_encoder_set_property,
-};
-
-int armada_drm_connector_slave_create(struct drm_device *dev,
-       const struct armada_drm_slave_config *config)
-{
-       return armada_output_create(dev, &armada_drm_conn_slave, config);
-}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
deleted file mode 100644 (file)
index bf2374c..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2012 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef ARMADA_SLAVE_H
-#define ARMADA_SLAVE_H
-
-#include <linux/i2c.h>
-#include <drm/drmP.h>
-
-struct armada_drm_slave_config {
-       int i2c_adapter_id;
-       uint32_t crtcs;
-       uint8_t polled;
-       bool interlace_allowed;
-       bool doublescan_allowed;
-       struct i2c_board_info info;
-};
-
-int armada_drm_connector_slave_create(struct drm_device *dev,
-       const struct armada_drm_slave_config *);
-
-#endif
index 8bc62ec407f9a928b4655e4b286ca4a2a7a82d97..244df0a440b730a71ba0d21c3f746569b4b81b36 100644 (file)
@@ -656,7 +656,8 @@ static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev)
        regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr);
 }
 
-static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev, int crtc)
+static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev,
+                                       unsigned int pipe)
 {
        struct atmel_hlcdc_dc *dc = dev->dev_private;
 
@@ -666,7 +667,8 @@ static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev, int crtc)
        return 0;
 }
 
-static void atmel_hlcdc_dc_disable_vblank(struct drm_device *dev, int crtc)
+static void atmel_hlcdc_dc_disable_vblank(struct drm_device *dev,
+                                         unsigned int pipe)
 {
        struct atmel_hlcdc_dc *dc = dev->dev_private;
 
@@ -697,7 +699,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
        .irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
        .irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
        .irq_uninstall = atmel_hlcdc_dc_irq_uninstall,
-       .get_vblank_counter = drm_vblank_count,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank = atmel_hlcdc_dc_enable_vblank,
        .disable_vblank = atmel_hlcdc_dc_disable_vblank,
        .gem_free_object = drm_gem_cma_free_object,
index be9fa8220499cf786e27a7ddfb449f2e94b322f2..d0299aed517e61039cd4b0b93897b21f7c13390c 100644 (file)
@@ -633,7 +633,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
                if (!state->bpp[i])
                        return -EINVAL;
 
-               switch (state->base.rotation & 0xf) {
+               switch (state->base.rotation & DRM_ROTATE_MASK) {
                case BIT(DRM_ROTATE_90):
                        offset = ((y_offset + state->src_y + patched_src_w - 1) /
                                  ydiv) * fb->pitches[i];
@@ -712,11 +712,13 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
 }
 
 static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
-                                       struct drm_framebuffer *fb,
                                        const struct drm_plane_state *new_state)
 {
        struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
 
+       if (!new_state->fb)
+               return 0;
+
        return atmel_hlcdc_layer_update_start(&plane->layer);
 }
 
index 2de52a53a80335a859547956e904851b9249c00c..6dddd392aa42f119ff572993ac887eb3a3a277b6 100644 (file)
@@ -11,6 +11,18 @@ config DRM_DW_HDMI
        tristate
        select DRM_KMS_HELPER
 
+config DRM_DW_HDMI_AHB_AUDIO
+       tristate "Synopsis Designware AHB Audio interface"
+       depends on DRM_DW_HDMI && SND
+       select SND_PCM
+       select SND_PCM_ELD
+       select SND_PCM_IEC958
+       help
+         Support the AHB Audio interface which is part of the Synopsis
+         Designware HDMI block.  This is used in conjunction with
+         the i.MX6 HDMI driver.
+
+
 config DRM_NXP_PTN3460
        tristate "NXP PTN3460 DP/LVDS bridge"
        depends on OF
index e2eef1c2f4c3e0e48f8f28d1834b91dc77d41326..d4e28beec30eb7571caa68cd55454c3888f11ea5 100644 (file)
@@ -1,5 +1,6 @@
 ccflags-y := -Iinclude/drm
 
 obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o
+obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw_hdmi-ahb-audio.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
 obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw_hdmi-ahb-audio.c
new file mode 100644 (file)
index 0000000..59f630f
--- /dev/null
@@ -0,0 +1,653 @@
+/*
+ * DesignWare HDMI audio driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written and tested against the Designware HDMI Tx found in iMX6.
+ */
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_edid.h>
+
+#include <sound/asoundef.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_drm_eld.h>
+#include <sound/pcm_iec958.h>
+
+#include "dw_hdmi-audio.h"
+
+#define DRIVER_NAME "dw-hdmi-ahb-audio"
+
+/* Provide some bits rather than bit offsets */
+enum {
+       HDMI_AHB_DMA_CONF0_SW_FIFO_RST = BIT(7),
+       HDMI_AHB_DMA_CONF0_EN_HLOCK = BIT(3),
+       HDMI_AHB_DMA_START_START = BIT(0),
+       HDMI_AHB_DMA_STOP_STOP = BIT(0),
+       HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = BIT(5),
+       HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = BIT(4),
+       HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = BIT(3),
+       HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = BIT(2),
+       HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
+       HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
+       HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL =
+               HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR |
+               HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST |
+               HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY |
+               HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE |
+               HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL |
+               HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY,
+       HDMI_IH_AHBDMAAUD_STAT0_ERROR = BIT(5),
+       HDMI_IH_AHBDMAAUD_STAT0_LOST = BIT(4),
+       HDMI_IH_AHBDMAAUD_STAT0_RETRY = BIT(3),
+       HDMI_IH_AHBDMAAUD_STAT0_DONE = BIT(2),
+       HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = BIT(1),
+       HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0),
+       HDMI_IH_AHBDMAAUD_STAT0_ALL =
+               HDMI_IH_AHBDMAAUD_STAT0_ERROR |
+               HDMI_IH_AHBDMAAUD_STAT0_LOST |
+               HDMI_IH_AHBDMAAUD_STAT0_RETRY |
+               HDMI_IH_AHBDMAAUD_STAT0_DONE |
+               HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL |
+               HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY,
+       HDMI_AHB_DMA_CONF0_INCR16 = 2 << 1,
+       HDMI_AHB_DMA_CONF0_INCR8 = 1 << 1,
+       HDMI_AHB_DMA_CONF0_INCR4 = 0,
+       HDMI_AHB_DMA_CONF0_BURST_MODE = BIT(0),
+       HDMI_AHB_DMA_MASK_DONE = BIT(7),
+
+       HDMI_REVISION_ID = 0x0001,
+       HDMI_IH_AHBDMAAUD_STAT0 = 0x0109,
+       HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189,
+       HDMI_FC_AUDICONF2 = 0x1027,
+       HDMI_FC_AUDSCONF = 0x1063,
+       HDMI_FC_AUDSCONF_LAYOUT1 = 1 << 0,
+       HDMI_FC_AUDSCONF_LAYOUT0 = 0 << 0,
+       HDMI_AHB_DMA_CONF0 = 0x3600,
+       HDMI_AHB_DMA_START = 0x3601,
+       HDMI_AHB_DMA_STOP = 0x3602,
+       HDMI_AHB_DMA_THRSLD = 0x3603,
+       HDMI_AHB_DMA_STRADDR0 = 0x3604,
+       HDMI_AHB_DMA_STPADDR0 = 0x3608,
+       HDMI_AHB_DMA_MASK = 0x3614,
+       HDMI_AHB_DMA_POL = 0x3615,
+       HDMI_AHB_DMA_CONF1 = 0x3616,
+       HDMI_AHB_DMA_BUFFPOL = 0x361a,
+};
+
+struct dw_hdmi_channel_conf {
+       u8 conf1;
+       u8 ca;
+};
+
+/*
+ * The default mapping of ALSA channels to HDMI channels and speaker
+ * allocation bits.  Note that we can't do channel remapping here -
+ * channels must be in the same order.
+ *
+ * Mappings for alsa-lib pcm/surround*.conf files:
+ *
+ *             Front   Sur4.0  Sur4.1  Sur5.0  Sur5.1  Sur7.1
+ * Channels    2       4       6       6       6       8
+ *
+ * Our mapping from ALSA channel to CEA686D speaker name and HDMI channel:
+ *
+ *                             Number of ALSA channels
+ * ALSA Channel        2       3       4       5       6       7       8
+ * 0           FL:0    =       =       =       =       =       =
+ * 1           FR:1    =       =       =       =       =       =
+ * 2                   FC:3    RL:4    LFE:2   =       =       =
+ * 3                           RR:5    RL:4    FC:3    =       =
+ * 4                                   RR:5    RL:4    =       =
+ * 5                                           RR:5    =       =
+ * 6                                                   RC:6    =
+ * 7                                                   RLC/FRC RLC/FRC
+ */
+static struct dw_hdmi_channel_conf default_hdmi_channel_config[7] = {
+       { 0x03, 0x00 }, /* FL,FR */
+       { 0x0b, 0x02 }, /* FL,FR,FC */
+       { 0x33, 0x08 }, /* FL,FR,RL,RR */
+       { 0x37, 0x09 }, /* FL,FR,LFE,RL,RR */
+       { 0x3f, 0x0b }, /* FL,FR,LFE,FC,RL,RR */
+       { 0x7f, 0x0f }, /* FL,FR,LFE,FC,RL,RR,RC */
+       { 0xff, 0x13 }, /* FL,FR,LFE,FC,RL,RR,[FR]RC,[FR]LC */
+};
+
+struct snd_dw_hdmi {
+       struct snd_card *card;
+       struct snd_pcm *pcm;
+       spinlock_t lock;
+       struct dw_hdmi_audio_data data;
+       struct snd_pcm_substream *substream;
+       void (*reformat)(struct snd_dw_hdmi *, size_t, size_t);
+       void *buf_src;
+       void *buf_dst;
+       dma_addr_t buf_addr;
+       unsigned buf_offset;
+       unsigned buf_period;
+       unsigned buf_size;
+       unsigned channels;
+       u8 revision;
+       u8 iec_offset;
+       u8 cs[192][8];
+};
+
+static void dw_hdmi_writel(u32 val, void __iomem *ptr)
+{
+       writeb_relaxed(val, ptr);
+       writeb_relaxed(val >> 8, ptr + 1);
+       writeb_relaxed(val >> 16, ptr + 2);
+       writeb_relaxed(val >> 24, ptr + 3);
+}
+
+/*
+ * Convert to hardware format: The userspace buffer contains IEC958 samples,
+ * with the PCUV bits in bits 31..28 and audio samples in bits 27..4.  We
+ * need these to be in bits 27..24, with the IEC B bit in bit 28, and audio
+ * samples in 23..0.
+ *
+ * Default preamble in bits 3..0: 8 = block start, 4 = even 2 = odd
+ *
+ * Ideally, we could do with having the data properly formatted in userspace.
+ */
+static void dw_hdmi_reformat_iec958(struct snd_dw_hdmi *dw,
+       size_t offset, size_t bytes)
+{
+       u32 *src = dw->buf_src + offset;
+       u32 *dst = dw->buf_dst + offset;
+       u32 *end = dw->buf_src + offset + bytes;
+
+       do {
+               u32 b, sample = *src++;
+
+               b = (sample & 8) << (28 - 3);
+
+               sample >>= 4;
+
+               *dst++ = sample | b;
+       } while (src < end);
+}
+
+static u32 parity(u32 sample)
+{
+       sample ^= sample >> 16;
+       sample ^= sample >> 8;
+       sample ^= sample >> 4;
+       sample ^= sample >> 2;
+       sample ^= sample >> 1;
+       return (sample & 1) << 27;
+}
+
+static void dw_hdmi_reformat_s24(struct snd_dw_hdmi *dw,
+       size_t offset, size_t bytes)
+{
+       u32 *src = dw->buf_src + offset;
+       u32 *dst = dw->buf_dst + offset;
+       u32 *end = dw->buf_src + offset + bytes;
+
+       do {
+               unsigned i;
+               u8 *cs;
+
+               cs = dw->cs[dw->iec_offset++];
+               if (dw->iec_offset >= 192)
+                       dw->iec_offset = 0;
+
+               i = dw->channels;
+               do {
+                       u32 sample = *src++;
+
+                       sample &= ~0xff000000;
+                       sample |= *cs++ << 24;
+                       sample |= parity(sample & ~0xf8000000);
+
+                       *dst++ = sample;
+               } while (--i);
+       } while (src < end);
+}
+
+static void dw_hdmi_create_cs(struct snd_dw_hdmi *dw,
+       struct snd_pcm_runtime *runtime)
+{
+       u8 cs[4];
+       unsigned ch, i, j;
+
+       snd_pcm_create_iec958_consumer(runtime, cs, sizeof(cs));
+
+       memset(dw->cs, 0, sizeof(dw->cs));
+
+       for (ch = 0; ch < 8; ch++) {
+               cs[2] &= ~IEC958_AES2_CON_CHANNEL;
+               cs[2] |= (ch + 1) << 4;
+
+               for (i = 0; i < ARRAY_SIZE(cs); i++) {
+                       unsigned c = cs[i];
+
+                       for (j = 0; j < 8; j++, c >>= 1)
+                               dw->cs[i * 8 + j][ch] = (c & 1) << 2;
+               }
+       }
+       dw->cs[0][0] |= BIT(4);
+}
+
+static void dw_hdmi_start_dma(struct snd_dw_hdmi *dw)
+{
+       void __iomem *base = dw->data.base;
+       unsigned offset = dw->buf_offset;
+       unsigned period = dw->buf_period;
+       u32 start, stop;
+
+       dw->reformat(dw, offset, period);
+
+       /* Clear all irqs before enabling irqs and starting DMA */
+       writeb_relaxed(HDMI_IH_AHBDMAAUD_STAT0_ALL,
+                      base + HDMI_IH_AHBDMAAUD_STAT0);
+
+       start = dw->buf_addr + offset;
+       stop = start + period - 1;
+
+       /* Setup the hardware start/stop addresses */
+       dw_hdmi_writel(start, base + HDMI_AHB_DMA_STRADDR0);
+       dw_hdmi_writel(stop, base + HDMI_AHB_DMA_STPADDR0);
+
+       writeb_relaxed((u8)~HDMI_AHB_DMA_MASK_DONE, base + HDMI_AHB_DMA_MASK);
+       writeb(HDMI_AHB_DMA_START_START, base + HDMI_AHB_DMA_START);
+
+       offset += period;
+       if (offset >= dw->buf_size)
+               offset = 0;
+       dw->buf_offset = offset;
+}
+
+static void dw_hdmi_stop_dma(struct snd_dw_hdmi *dw)
+{
+       /* Disable interrupts before disabling DMA */
+       writeb_relaxed(~0, dw->data.base + HDMI_AHB_DMA_MASK);
+       writeb_relaxed(HDMI_AHB_DMA_STOP_STOP, dw->data.base + HDMI_AHB_DMA_STOP);
+}
+
+static irqreturn_t snd_dw_hdmi_irq(int irq, void *data)
+{
+       struct snd_dw_hdmi *dw = data;
+       struct snd_pcm_substream *substream;
+       unsigned stat;
+
+       stat = readb_relaxed(dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
+       if (!stat)
+               return IRQ_NONE;
+
+       writeb_relaxed(stat, dw->data.base + HDMI_IH_AHBDMAAUD_STAT0);
+
+       substream = dw->substream;
+       if (stat & HDMI_IH_AHBDMAAUD_STAT0_DONE && substream) {
+               snd_pcm_period_elapsed(substream);
+
+               spin_lock(&dw->lock);
+               if (dw->substream)
+                       dw_hdmi_start_dma(dw);
+               spin_unlock(&dw->lock);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static struct snd_pcm_hardware dw_hdmi_hw = {
+       .info = SNDRV_PCM_INFO_INTERLEAVED |
+               SNDRV_PCM_INFO_BLOCK_TRANSFER |
+               SNDRV_PCM_INFO_MMAP |
+               SNDRV_PCM_INFO_MMAP_VALID,
+       .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE |
+                  SNDRV_PCM_FMTBIT_S24_LE,
+       .rates = SNDRV_PCM_RATE_32000 |
+                SNDRV_PCM_RATE_44100 |
+                SNDRV_PCM_RATE_48000 |
+                SNDRV_PCM_RATE_88200 |
+                SNDRV_PCM_RATE_96000 |
+                SNDRV_PCM_RATE_176400 |
+                SNDRV_PCM_RATE_192000,
+       .channels_min = 2,
+       .channels_max = 8,
+       .buffer_bytes_max = 1024 * 1024,
+       .period_bytes_min = 256,
+       .period_bytes_max = 8192,       /* ERR004323: must limit to 8k */
+       .periods_min = 2,
+       .periods_max = 16,
+       .fifo_size = 0,
+};
+
+static int dw_hdmi_open(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_dw_hdmi *dw = substream->private_data;
+       void __iomem *base = dw->data.base;
+       int ret;
+
+       runtime->hw = dw_hdmi_hw;
+
+       ret = snd_pcm_hw_constraint_eld(runtime, dw->data.eld);
+       if (ret < 0)
+               return ret;
+
+       ret = snd_pcm_limit_hw_rates(runtime);
+       if (ret < 0)
+               return ret;
+
+       ret = snd_pcm_hw_constraint_integer(runtime,
+                                           SNDRV_PCM_HW_PARAM_PERIODS);
+       if (ret < 0)
+               return ret;
+
+       /* Limit the buffer size to the size of the preallocated buffer */
+       ret = snd_pcm_hw_constraint_minmax(runtime,
+                                          SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+                                          0, substream->dma_buffer.bytes);
+       if (ret < 0)
+               return ret;
+
+       /* Clear FIFO */
+       writeb_relaxed(HDMI_AHB_DMA_CONF0_SW_FIFO_RST,
+                      base + HDMI_AHB_DMA_CONF0);
+
+       /* Configure interrupt polarities */
+       writeb_relaxed(~0, base + HDMI_AHB_DMA_POL);
+       writeb_relaxed(~0, base + HDMI_AHB_DMA_BUFFPOL);
+
+       /* Keep interrupts masked, and clear any pending */
+       writeb_relaxed(~0, base + HDMI_AHB_DMA_MASK);
+       writeb_relaxed(~0, base + HDMI_IH_AHBDMAAUD_STAT0);
+
+       ret = request_irq(dw->data.irq, snd_dw_hdmi_irq, IRQF_SHARED,
+                         "dw-hdmi-audio", dw);
+       if (ret)
+               return ret;
+
+       /* Un-mute done interrupt */
+       writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL &
+                      ~HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE,
+                      base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
+
+       return 0;
+}
+
+static int dw_hdmi_close(struct snd_pcm_substream *substream)
+{
+       struct snd_dw_hdmi *dw = substream->private_data;
+
+       /* Mute all interrupts */
+       writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
+                      dw->data.base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
+
+       free_irq(dw->data.irq, dw);
+
+       return 0;
+}
+
+static int dw_hdmi_hw_free(struct snd_pcm_substream *substream)
+{
+       return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int dw_hdmi_hw_params(struct snd_pcm_substream *substream,
+       struct snd_pcm_hw_params *params)
+{
+       /* Allocate the PCM runtime buffer, which is exposed to userspace. */
+       return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+                                               params_buffer_bytes(params));
+}
+
+static int dw_hdmi_prepare(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_dw_hdmi *dw = substream->private_data;
+       u8 threshold, conf0, conf1, layout, ca;
+
+       /* Setup as per 3.0.5 FSL 4.1.0 BSP */
+       switch (dw->revision) {
+       case 0x0a:
+               conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
+                       HDMI_AHB_DMA_CONF0_INCR4;
+               if (runtime->channels == 2)
+                       threshold = 126;
+               else
+                       threshold = 124;
+               break;
+       case 0x1a:
+               conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE |
+                       HDMI_AHB_DMA_CONF0_INCR8;
+               threshold = 128;
+               break;
+       default:
+               /* NOTREACHED */
+               return -EINVAL;
+       }
+
+       dw_hdmi_set_sample_rate(dw->data.hdmi, runtime->rate);
+
+       /* Minimum number of bytes in the fifo. */
+       runtime->hw.fifo_size = threshold * 32;
+
+       conf0 |= HDMI_AHB_DMA_CONF0_EN_HLOCK;
+       conf1 = default_hdmi_channel_config[runtime->channels - 2].conf1;
+       ca = default_hdmi_channel_config[runtime->channels - 2].ca;
+
+       /*
+        * For >2 channel PCM audio, we need to select layout 1
+        * and set an appropriate channel map.
+        */
+       if (runtime->channels > 2)
+               layout = HDMI_FC_AUDSCONF_LAYOUT1;
+       else
+               layout = HDMI_FC_AUDSCONF_LAYOUT0;
+
+       writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD);
+       writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0);
+       writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1);
+       writeb_relaxed(layout, dw->data.base + HDMI_FC_AUDSCONF);
+       writeb_relaxed(ca, dw->data.base + HDMI_FC_AUDICONF2);
+
+       switch (runtime->format) {
+       case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
+               dw->reformat = dw_hdmi_reformat_iec958;
+               break;
+       case SNDRV_PCM_FORMAT_S24_LE:
+               dw_hdmi_create_cs(dw, runtime);
+               dw->reformat = dw_hdmi_reformat_s24;
+               break;
+       }
+       dw->iec_offset = 0;
+       dw->channels = runtime->channels;
+       dw->buf_src  = runtime->dma_area;
+       dw->buf_dst  = substream->dma_buffer.area;
+       dw->buf_addr = substream->dma_buffer.addr;
+       dw->buf_period = snd_pcm_lib_period_bytes(substream);
+       dw->buf_size = snd_pcm_lib_buffer_bytes(substream);
+
+       return 0;
+}
+
+static int dw_hdmi_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+       struct snd_dw_hdmi *dw = substream->private_data;
+       unsigned long flags;
+       int ret = 0;
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+               spin_lock_irqsave(&dw->lock, flags);
+               dw->buf_offset = 0;
+               dw->substream = substream;
+               dw_hdmi_start_dma(dw);
+               dw_hdmi_audio_enable(dw->data.hdmi);
+               spin_unlock_irqrestore(&dw->lock, flags);
+               substream->runtime->delay = substream->runtime->period_size;
+               break;
+
+       case SNDRV_PCM_TRIGGER_STOP:
+               spin_lock_irqsave(&dw->lock, flags);
+               dw->substream = NULL;
+               dw_hdmi_stop_dma(dw);
+               dw_hdmi_audio_disable(dw->data.hdmi);
+               spin_unlock_irqrestore(&dw->lock, flags);
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_dw_hdmi *dw = substream->private_data;
+
+       /*
+        * We are unable to report the exact hardware position as
+        * reading the 32-bit DMA position using 8-bit reads is racy.
+        */
+       return bytes_to_frames(runtime, dw->buf_offset);
+}
+
+static struct snd_pcm_ops snd_dw_hdmi_ops = {
+       .open = dw_hdmi_open,
+       .close = dw_hdmi_close,
+       .ioctl = snd_pcm_lib_ioctl,
+       .hw_params = dw_hdmi_hw_params,
+       .hw_free = dw_hdmi_hw_free,
+       .prepare = dw_hdmi_prepare,
+       .trigger = dw_hdmi_trigger,
+       .pointer = dw_hdmi_pointer,
+       .page = snd_pcm_lib_get_vmalloc_page,
+};
+
+static int snd_dw_hdmi_probe(struct platform_device *pdev)
+{
+       const struct dw_hdmi_audio_data *data = pdev->dev.platform_data;
+       struct device *dev = pdev->dev.parent;
+       struct snd_dw_hdmi *dw;
+       struct snd_card *card;
+       struct snd_pcm *pcm;
+       unsigned revision;
+       int ret;
+
+       writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL,
+                      data->base + HDMI_IH_MUTE_AHBDMAAUD_STAT0);
+       revision = readb_relaxed(data->base + HDMI_REVISION_ID);
+       if (revision != 0x0a && revision != 0x1a) {
+               dev_err(dev, "dw-hdmi-audio: unknown revision 0x%02x\n",
+                       revision);
+               return -ENXIO;
+       }
+
+       ret = snd_card_new(dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+                             THIS_MODULE, sizeof(struct snd_dw_hdmi), &card);
+       if (ret < 0)
+               return ret;
+
+       strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
+       strlcpy(card->shortname, "DW-HDMI", sizeof(card->shortname));
+       snprintf(card->longname, sizeof(card->longname),
+                "%s rev 0x%02x, irq %d", card->shortname, revision,
+                data->irq);
+
+       dw = card->private_data;
+       dw->card = card;
+       dw->data = *data;
+       dw->revision = revision;
+
+       spin_lock_init(&dw->lock);
+
+       ret = snd_pcm_new(card, "DW HDMI", 0, 1, 0, &pcm);
+       if (ret < 0)
+               goto err;
+
+       dw->pcm = pcm;
+       pcm->private_data = dw;
+       strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name));
+       snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops);
+
+       /*
+        * To support 8-channel 96kHz audio reliably, we need 512k
+        * to satisfy alsa with our restricted period (ERR004323).
+        */
+       snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+                       dev, 128 * 1024, 1024 * 1024);
+
+       ret = snd_card_register(card);
+       if (ret < 0)
+               goto err;
+
+       platform_set_drvdata(pdev, dw);
+
+       return 0;
+
+err:
+       snd_card_free(card);
+       return ret;
+}
+
+static int snd_dw_hdmi_remove(struct platform_device *pdev)
+{
+       struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
+
+       snd_card_free(dw->card);
+
+       return 0;
+}
+
+#if defined(CONFIG_PM_SLEEP) && defined(IS_NOT_BROKEN)
+/*
+ * This code is fine, but requires implementation in the dw_hdmi_trigger()
+ * method which is currently missing as I have no way to test this.
+ */
+static int snd_dw_hdmi_suspend(struct device *dev)
+{
+       struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
+
+       snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold);
+       snd_pcm_suspend_all(dw->pcm);
+
+       return 0;
+}
+
+static int snd_dw_hdmi_resume(struct device *dev)
+{
+       struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
+
+       snd_power_change_state(dw->card, SNDRV_CTL_POWER_D0);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend,
+                        snd_dw_hdmi_resume);
+#define PM_OPS &snd_dw_hdmi_pm
+#else
+#define PM_OPS NULL
+#endif
+
+static struct platform_driver snd_dw_hdmi_driver = {
+       .probe  = snd_dw_hdmi_probe,
+       .remove = snd_dw_hdmi_remove,
+       .driver = {
+               .name = DRIVER_NAME,
+               .owner = THIS_MODULE,
+               .pm = PM_OPS,
+       },
+};
+
+module_platform_driver(snd_dw_hdmi_driver);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Synopsis Designware HDMI AHB ALSA interface");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/dw_hdmi-audio.h b/drivers/gpu/drm/bridge/dw_hdmi-audio.h
new file mode 100644 (file)
index 0000000..91f631b
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef DW_HDMI_AUDIO_H
+#define DW_HDMI_AUDIO_H
+
+struct dw_hdmi;
+
+struct dw_hdmi_audio_data {
+       phys_addr_t phys;
+       void __iomem *base;
+       int irq;
+       struct dw_hdmi *hdmi;
+       u8 *eld;
+};
+
+#endif
index 0083d4e7e7e2792a06a67956858b1d1eaf8f983e..56de9f1c95fcdb7b7c20a7bd16687b5706aa9184 100644 (file)
@@ -28,6 +28,7 @@
 #include <drm/bridge/dw_hdmi.h>
 
 #include "dw_hdmi.h"
+#include "dw_hdmi-audio.h"
 
 #define HDMI_EDID_LEN          512
 
@@ -104,6 +105,7 @@ struct dw_hdmi {
        struct drm_encoder *encoder;
        struct drm_bridge *bridge;
 
+       struct platform_device *audio;
        enum dw_hdmi_devtype dev_type;
        struct device *dev;
        struct clk *isfr_clk;
@@ -126,7 +128,11 @@ struct dw_hdmi {
        bool sink_has_audio;
 
        struct mutex mutex;             /* for state below and previous_mode */
+       enum drm_connector_force force; /* mutex-protected force state */
        bool disabled;                  /* DRM has disabled our bridge */
+       bool bridge_is_on;              /* indicates the bridge is on */
+       bool rxsense;                   /* rxsense state */
+       u8 phy_mask;                    /* desired phy int mask settings */
 
        spinlock_t audio_lock;
        struct mutex audio_mutex;
@@ -134,12 +140,19 @@ struct dw_hdmi {
        unsigned int audio_cts;
        unsigned int audio_n;
        bool audio_enable;
-       int ratio;
 
        void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
        u8 (*read)(struct dw_hdmi *hdmi, int offset);
 };
 
+#define HDMI_IH_PHY_STAT0_RX_SENSE \
+       (HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \
+        HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3)
+
+#define HDMI_PHY_RX_SENSE \
+       (HDMI_PHY_RX_SENSE0 | HDMI_PHY_RX_SENSE1 | \
+        HDMI_PHY_RX_SENSE2 | HDMI_PHY_RX_SENSE3)
+
 static void dw_hdmi_writel(struct dw_hdmi *hdmi, u8 val, int offset)
 {
        writel(val, hdmi->regs + (offset << 2));
@@ -203,61 +216,53 @@ static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
        hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1);
 }
 
-static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
-                                  unsigned int ratio)
+static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
 {
        unsigned int n = (128 * freq) / 1000;
+       unsigned int mult = 1;
+
+       while (freq > 48000) {
+               mult *= 2;
+               freq /= 2;
+       }
 
        switch (freq) {
        case 32000:
-               if (pixel_clk == 25170000)
-                       n = (ratio == 150) ? 9152 : 4576;
-               else if (pixel_clk == 27020000)
-                       n = (ratio == 150) ? 8192 : 4096;
-               else if (pixel_clk == 74170000 || pixel_clk == 148350000)
+               if (pixel_clk == 25175000)
+                       n = 4576;
+               else if (pixel_clk == 27027000)
+                       n = 4096;
+               else if (pixel_clk == 74176000 || pixel_clk == 148352000)
                        n = 11648;
                else
                        n = 4096;
+               n *= mult;
                break;
 
        case 44100:
-               if (pixel_clk == 25170000)
+               if (pixel_clk == 25175000)
                        n = 7007;
-               else if (pixel_clk == 74170000)
+               else if (pixel_clk == 74176000)
                        n = 17836;
-               else if (pixel_clk == 148350000)
-                       n = (ratio == 150) ? 17836 : 8918;
+               else if (pixel_clk == 148352000)
+                       n = 8918;
                else
                        n = 6272;
+               n *= mult;
                break;
 
        case 48000:
-               if (pixel_clk == 25170000)
-                       n = (ratio == 150) ? 9152 : 6864;
-               else if (pixel_clk == 27020000)
-                       n = (ratio == 150) ? 8192 : 6144;
-               else if (pixel_clk == 74170000)
+               if (pixel_clk == 25175000)
+                       n = 6864;
+               else if (pixel_clk == 27027000)
+                       n = 6144;
+               else if (pixel_clk == 74176000)
                        n = 11648;
-               else if (pixel_clk == 148350000)
-                       n = (ratio == 150) ? 11648 : 5824;
+               else if (pixel_clk == 148352000)
+                       n = 5824;
                else
                        n = 6144;
-               break;
-
-       case 88200:
-               n = hdmi_compute_n(44100, pixel_clk, ratio) * 2;
-               break;
-
-       case 96000:
-               n = hdmi_compute_n(48000, pixel_clk, ratio) * 2;
-               break;
-
-       case 176400:
-               n = hdmi_compute_n(44100, pixel_clk, ratio) * 4;
-               break;
-
-       case 192000:
-               n = hdmi_compute_n(48000, pixel_clk, ratio) * 4;
+               n *= mult;
                break;
 
        default:
@@ -267,93 +272,29 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk,
        return n;
 }
 
-static unsigned int hdmi_compute_cts(unsigned int freq, unsigned long pixel_clk,
-                                    unsigned int ratio)
-{
-       unsigned int cts = 0;
-
-       pr_debug("%s: freq: %d pixel_clk: %ld ratio: %d\n", __func__, freq,
-                pixel_clk, ratio);
-
-       switch (freq) {
-       case 32000:
-               if (pixel_clk == 297000000) {
-                       cts = 222750;
-                       break;
-               }
-       case 48000:
-       case 96000:
-       case 192000:
-               switch (pixel_clk) {
-               case 25200000:
-               case 27000000:
-               case 54000000:
-               case 74250000:
-               case 148500000:
-                       cts = pixel_clk / 1000;
-                       break;
-               case 297000000:
-                       cts = 247500;
-                       break;
-               /*
-                * All other TMDS clocks are not supported by
-                * DWC_hdmi_tx. The TMDS clocks divided or
-                * multiplied by 1,001 coefficients are not
-                * supported.
-                */
-               default:
-                       break;
-               }
-               break;
-       case 44100:
-       case 88200:
-       case 176400:
-               switch (pixel_clk) {
-               case 25200000:
-                       cts = 28000;
-                       break;
-               case 27000000:
-                       cts = 30000;
-                       break;
-               case 54000000:
-                       cts = 60000;
-                       break;
-               case 74250000:
-                       cts = 82500;
-                       break;
-               case 148500000:
-                       cts = 165000;
-                       break;
-               case 297000000:
-                       cts = 247500;
-                       break;
-               default:
-                       break;
-               }
-               break;
-       default:
-               break;
-       }
-       if (ratio == 100)
-               return cts;
-       return (cts * ratio) / 100;
-}
-
 static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
-       unsigned long pixel_clk, unsigned int sample_rate, unsigned int ratio)
+       unsigned long pixel_clk, unsigned int sample_rate)
 {
+       unsigned long ftdms = pixel_clk;
        unsigned int n, cts;
+       u64 tmp;
 
-       n = hdmi_compute_n(sample_rate, pixel_clk, ratio);
-       cts = hdmi_compute_cts(sample_rate, pixel_clk, ratio);
-       if (!cts) {
-               dev_err(hdmi->dev,
-                       "%s: pixel clock/sample rate not supported: %luMHz / %ukHz\n",
-                       __func__, pixel_clk, sample_rate);
-       }
+       n = hdmi_compute_n(sample_rate, pixel_clk);
+
+       /*
+        * Compute the CTS value from the N value.  Note that CTS and N
+        * can be up to 20 bits in total, so we need 64-bit math.  Also
+        * note that our TDMS clock is not fully accurate; it is accurate
+        * to kHz.  This can introduce an unnecessary remainder in the
+        * calculation below, so we don't try to warn about that.
+        */
+       tmp = (u64)ftdms * n;
+       do_div(tmp, 128 * sample_rate);
+       cts = tmp;
 
-       dev_dbg(hdmi->dev, "%s: samplerate=%ukHz ratio=%d pixelclk=%luMHz N=%d cts=%d\n",
-               __func__, sample_rate, ratio, pixel_clk, n, cts);
+       dev_dbg(hdmi->dev, "%s: fs=%uHz ftdms=%lu.%03luMHz N=%d cts=%d\n",
+               __func__, sample_rate, ftdms / 1000000, (ftdms / 1000) % 1000,
+               n, cts);
 
        spin_lock_irq(&hdmi->audio_lock);
        hdmi->audio_n = n;
@@ -365,8 +306,7 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
 static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi)
 {
        mutex_lock(&hdmi->audio_mutex);
-       hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate,
-                                hdmi->ratio);
+       hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate);
        mutex_unlock(&hdmi->audio_mutex);
 }
 
@@ -374,7 +314,7 @@ static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
 {
        mutex_lock(&hdmi->audio_mutex);
        hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
-                                hdmi->sample_rate, hdmi->ratio);
+                                hdmi->sample_rate);
        mutex_unlock(&hdmi->audio_mutex);
 }
 
@@ -383,7 +323,7 @@ void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
        mutex_lock(&hdmi->audio_mutex);
        hdmi->sample_rate = rate;
        hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock,
-                                hdmi->sample_rate, hdmi->ratio);
+                                hdmi->sample_rate);
        mutex_unlock(&hdmi->audio_mutex);
 }
 EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
@@ -1063,6 +1003,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
        u8 inv_val;
        struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
        int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
+       unsigned int vdisplay;
 
        vmode->mpixelclock = mode->clock * 1000;
 
@@ -1102,13 +1043,29 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
 
        hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF);
 
+       vdisplay = mode->vdisplay;
+       vblank = mode->vtotal - mode->vdisplay;
+       v_de_vs = mode->vsync_start - mode->vdisplay;
+       vsync_len = mode->vsync_end - mode->vsync_start;
+
+       /*
+        * When we're setting an interlaced mode, we need
+        * to adjust the vertical timing to suit.
+        */
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               vdisplay /= 2;
+               vblank /= 2;
+               v_de_vs /= 2;
+               vsync_len /= 2;
+       }
+
        /* Set up horizontal active pixel width */
        hdmi_writeb(hdmi, mode->hdisplay >> 8, HDMI_FC_INHACTV1);
        hdmi_writeb(hdmi, mode->hdisplay, HDMI_FC_INHACTV0);
 
        /* Set up vertical active lines */
-       hdmi_writeb(hdmi, mode->vdisplay >> 8, HDMI_FC_INVACTV1);
-       hdmi_writeb(hdmi, mode->vdisplay, HDMI_FC_INVACTV0);
+       hdmi_writeb(hdmi, vdisplay >> 8, HDMI_FC_INVACTV1);
+       hdmi_writeb(hdmi, vdisplay, HDMI_FC_INVACTV0);
 
        /* Set up horizontal blanking pixel region width */
        hblank = mode->htotal - mode->hdisplay;
@@ -1116,7 +1073,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
        hdmi_writeb(hdmi, hblank, HDMI_FC_INHBLANK0);
 
        /* Set up vertical blanking pixel region width */
-       vblank = mode->vtotal - mode->vdisplay;
        hdmi_writeb(hdmi, vblank, HDMI_FC_INVBLANK);
 
        /* Set up HSYNC active edge delay width (in pixel clks) */
@@ -1125,7 +1081,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
        hdmi_writeb(hdmi, h_de_hs, HDMI_FC_HSYNCINDELAY0);
 
        /* Set up VSYNC active edge delay (in lines) */
-       v_de_vs = mode->vsync_start - mode->vdisplay;
        hdmi_writeb(hdmi, v_de_vs, HDMI_FC_VSYNCINDELAY);
 
        /* Set up HSYNC active pulse width (in pixel clks) */
@@ -1134,7 +1089,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
        hdmi_writeb(hdmi, hsync_len, HDMI_FC_HSYNCINWIDTH0);
 
        /* Set up VSYNC active edge delay (in lines) */
-       vsync_len = mode->vsync_end - mode->vsync_start;
        hdmi_writeb(hdmi, vsync_len, HDMI_FC_VSYNCINWIDTH);
 }
 
@@ -1302,10 +1256,11 @@ static int dw_hdmi_fb_registered(struct dw_hdmi *hdmi)
                    HDMI_PHY_I2CM_CTLINT_ADDR);
 
        /* enable cable hot plug irq */
-       hdmi_writeb(hdmi, (u8)~HDMI_PHY_HPD, HDMI_PHY_MASK0);
+       hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
 
        /* Clear Hotplug interrupts */
-       hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
+       hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
+                   HDMI_IH_PHY_STAT0);
 
        return 0;
 }
@@ -1364,12 +1319,61 @@ static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi)
 
 static void dw_hdmi_poweron(struct dw_hdmi *hdmi)
 {
+       hdmi->bridge_is_on = true;
        dw_hdmi_setup(hdmi, &hdmi->previous_mode);
 }
 
 static void dw_hdmi_poweroff(struct dw_hdmi *hdmi)
 {
        dw_hdmi_phy_disable(hdmi);
+       hdmi->bridge_is_on = false;
+}
+
+static void dw_hdmi_update_power(struct dw_hdmi *hdmi)
+{
+       int force = hdmi->force;
+
+       if (hdmi->disabled) {
+               force = DRM_FORCE_OFF;
+       } else if (force == DRM_FORCE_UNSPECIFIED) {
+               if (hdmi->rxsense)
+                       force = DRM_FORCE_ON;
+               else
+                       force = DRM_FORCE_OFF;
+       }
+
+       if (force == DRM_FORCE_OFF) {
+               if (hdmi->bridge_is_on)
+                       dw_hdmi_poweroff(hdmi);
+       } else {
+               if (!hdmi->bridge_is_on)
+                       dw_hdmi_poweron(hdmi);
+       }
+}
+
+/*
+ * Adjust the detection of RXSENSE according to whether we have a forced
+ * connection mode enabled, or whether we have been disabled.  There is
+ * no point processing RXSENSE interrupts if we have a forced connection
+ * state, or DRM has us disabled.
+ *
+ * We also disable rxsense interrupts when we think we're disconnected
+ * to avoid floating TDMS signals giving false rxsense interrupts.
+ *
+ * Note: we still need to listen for HPD interrupts even when DRM has us
+ * disabled so that we can detect a connect event.
+ */
+static void dw_hdmi_update_phy_mask(struct dw_hdmi *hdmi)
+{
+       u8 old_mask = hdmi->phy_mask;
+
+       if (hdmi->force || hdmi->disabled || !hdmi->rxsense)
+               hdmi->phy_mask |= HDMI_PHY_RX_SENSE;
+       else
+               hdmi->phy_mask &= ~HDMI_PHY_RX_SENSE;
+
+       if (old_mask != hdmi->phy_mask)
+               hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0);
 }
 
 static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
@@ -1399,7 +1403,8 @@ static void dw_hdmi_bridge_disable(struct drm_bridge *bridge)
 
        mutex_lock(&hdmi->mutex);
        hdmi->disabled = true;
-       dw_hdmi_poweroff(hdmi);
+       dw_hdmi_update_power(hdmi);
+       dw_hdmi_update_phy_mask(hdmi);
        mutex_unlock(&hdmi->mutex);
 }
 
@@ -1408,8 +1413,9 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
        struct dw_hdmi *hdmi = bridge->driver_private;
 
        mutex_lock(&hdmi->mutex);
-       dw_hdmi_poweron(hdmi);
        hdmi->disabled = false;
+       dw_hdmi_update_power(hdmi);
+       dw_hdmi_update_phy_mask(hdmi);
        mutex_unlock(&hdmi->mutex);
 }
 
@@ -1424,6 +1430,12 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
        struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
                                             connector);
 
+       mutex_lock(&hdmi->mutex);
+       hdmi->force = DRM_FORCE_UNSPECIFIED;
+       dw_hdmi_update_power(hdmi);
+       dw_hdmi_update_phy_mask(hdmi);
+       mutex_unlock(&hdmi->mutex);
+
        return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ?
                connector_status_connected : connector_status_disconnected;
 }
@@ -1447,6 +1459,8 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
                hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
                drm_mode_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
+               /* Store the ELD */
+               drm_edid_to_eld(connector, edid);
                kfree(edid);
        } else {
                dev_dbg(hdmi->dev, "failed to get edid\n");
@@ -1488,11 +1502,24 @@ static void dw_hdmi_connector_destroy(struct drm_connector *connector)
        drm_connector_cleanup(connector);
 }
 
+static void dw_hdmi_connector_force(struct drm_connector *connector)
+{
+       struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
+                                            connector);
+
+       mutex_lock(&hdmi->mutex);
+       hdmi->force = connector->force;
+       dw_hdmi_update_power(hdmi);
+       dw_hdmi_update_phy_mask(hdmi);
+       mutex_unlock(&hdmi->mutex);
+}
+
 static struct drm_connector_funcs dw_hdmi_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = dw_hdmi_connector_detect,
        .destroy = dw_hdmi_connector_destroy,
+       .force = dw_hdmi_connector_force,
 };
 
 static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
@@ -1525,33 +1552,69 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
 static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
 {
        struct dw_hdmi *hdmi = dev_id;
-       u8 intr_stat;
-       u8 phy_int_pol;
+       u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat;
 
        intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
-
        phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
+       phy_stat = hdmi_readb(hdmi, HDMI_PHY_STAT0);
+
+       phy_pol_mask = 0;
+       if (intr_stat & HDMI_IH_PHY_STAT0_HPD)
+               phy_pol_mask |= HDMI_PHY_HPD;
+       if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE0)
+               phy_pol_mask |= HDMI_PHY_RX_SENSE0;
+       if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE1)
+               phy_pol_mask |= HDMI_PHY_RX_SENSE1;
+       if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE2)
+               phy_pol_mask |= HDMI_PHY_RX_SENSE2;
+       if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE3)
+               phy_pol_mask |= HDMI_PHY_RX_SENSE3;
+
+       if (phy_pol_mask)
+               hdmi_modb(hdmi, ~phy_int_pol, phy_pol_mask, HDMI_PHY_POL0);
 
-       if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
-               hdmi_modb(hdmi, ~phy_int_pol, HDMI_PHY_HPD, HDMI_PHY_POL0);
+       /*
+        * RX sense tells us whether the TDMS transmitters are detecting
+        * load - in other words, there's something listening on the
+        * other end of the link.  Use this to decide whether we should
+        * power on the phy as HPD may be toggled by the sink to merely
+        * ask the source to re-read the EDID.
+        */
+       if (intr_stat &
+           (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
                mutex_lock(&hdmi->mutex);
-               if (phy_int_pol & HDMI_PHY_HPD) {
-                       dev_dbg(hdmi->dev, "EVENT=plugin\n");
-
-                       if (!hdmi->disabled)
-                               dw_hdmi_poweron(hdmi);
-               } else {
-                       dev_dbg(hdmi->dev, "EVENT=plugout\n");
-
-                       if (!hdmi->disabled)
-                               dw_hdmi_poweroff(hdmi);
+               if (!hdmi->disabled && !hdmi->force) {
+                       /*
+                        * If the RX sense status indicates we're disconnected,
+                        * clear the software rxsense status.
+                        */
+                       if (!(phy_stat & HDMI_PHY_RX_SENSE))
+                               hdmi->rxsense = false;
+
+                       /*
+                        * Only set the software rxsense status when both
+                        * rxsense and hpd indicates we're connected.
+                        * This avoids what seems to be bad behaviour in
+                        * at least iMX6S versions of the phy.
+                        */
+                       if (phy_stat & HDMI_PHY_HPD)
+                               hdmi->rxsense = true;
+
+                       dw_hdmi_update_power(hdmi);
+                       dw_hdmi_update_phy_mask(hdmi);
                }
                mutex_unlock(&hdmi->mutex);
+       }
+
+       if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
+               dev_dbg(hdmi->dev, "EVENT=%s\n",
+                       phy_int_pol & HDMI_PHY_HPD ? "plugin" : "plugout");
                drm_helper_hpd_irq_event(hdmi->bridge->dev);
        }
 
        hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0);
-       hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
+       hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
+                   HDMI_IH_MUTE_PHY_STAT0);
 
        return IRQ_HANDLED;
 }
@@ -1599,7 +1662,9 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
 {
        struct drm_device *drm = data;
        struct device_node *np = dev->of_node;
+       struct platform_device_info pdevinfo;
        struct device_node *ddc_node;
+       struct dw_hdmi_audio_data audio;
        struct dw_hdmi *hdmi;
        int ret;
        u32 val = 1;
@@ -1608,13 +1673,16 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
        if (!hdmi)
                return -ENOMEM;
 
+       hdmi->connector.interlace_allowed = 1;
+
        hdmi->plat_data = plat_data;
        hdmi->dev = dev;
        hdmi->dev_type = plat_data->dev_type;
        hdmi->sample_rate = 48000;
-       hdmi->ratio = 100;
        hdmi->encoder = encoder;
        hdmi->disabled = true;
+       hdmi->rxsense = true;
+       hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
 
        mutex_init(&hdmi->mutex);
        mutex_init(&hdmi->audio_mutex);
@@ -1705,10 +1773,11 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
         * Configure registers related to HDMI interrupt
         * generation before registering IRQ.
         */
-       hdmi_writeb(hdmi, HDMI_PHY_HPD, HDMI_PHY_POL0);
+       hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE, HDMI_PHY_POL0);
 
        /* Clear Hotplug interrupts */
-       hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD, HDMI_IH_PHY_STAT0);
+       hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE,
+                   HDMI_IH_PHY_STAT0);
 
        ret = dw_hdmi_fb_registered(hdmi);
        if (ret)
@@ -1719,7 +1788,26 @@ int dw_hdmi_bind(struct device *dev, struct device *master,
                goto err_iahb;
 
        /* Unmute interrupts */
-       hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0);
+       hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE),
+                   HDMI_IH_MUTE_PHY_STAT0);
+
+       memset(&pdevinfo, 0, sizeof(pdevinfo));
+       pdevinfo.parent = dev;
+       pdevinfo.id = PLATFORM_DEVID_AUTO;
+
+       if (hdmi_readb(hdmi, HDMI_CONFIG1_ID) & HDMI_CONFIG1_AHB) {
+               audio.phys = iores->start;
+               audio.base = hdmi->regs;
+               audio.irq = irq;
+               audio.hdmi = hdmi;
+               audio.eld = hdmi->connector.eld;
+
+               pdevinfo.name = "dw-hdmi-ahb-audio";
+               pdevinfo.data = &audio;
+               pdevinfo.size_data = sizeof(audio);
+               pdevinfo.dma_mask = DMA_BIT_MASK(32);
+               hdmi->audio = platform_device_register_full(&pdevinfo);
+       }
 
        dev_set_drvdata(dev, hdmi);
 
@@ -1738,6 +1826,9 @@ void dw_hdmi_unbind(struct device *dev, struct device *master, void *data)
 {
        struct dw_hdmi *hdmi = dev_get_drvdata(dev);
 
+       if (hdmi->audio && !IS_ERR(hdmi->audio))
+               platform_device_unregister(hdmi->audio);
+
        /* Disable all interrupts */
        hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
 
index ee7f7ed2ab12222c2c71cc2fa6709d181c77a97f..fc9a560429d6efe94a83c93162c68373e8d6b90a 100644 (file)
 #define HDMI_I2CM_FS_SCL_LCNT_0_ADDR            0x7E12
 
 enum {
+/* CONFIG1_ID field values */
+       HDMI_CONFIG1_AHB = 0x01,
+
 /* IH_FC_INT2 field values */
        HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03,
        HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
index 4b2b4aa5033ba1d6988f4f2c7dfc1ae605d82da5..a10ea6aec6291f4c233a0b1d49a12fcbace37f17 100644 (file)
@@ -36,8 +36,6 @@
 #include <linux/slab.h>
 #include "drm_legacy.h"
 
-#if __OS_HAS_AGP
-
 #include <asm/agp.h>
 
 /**
@@ -502,5 +500,3 @@ drm_agp_bind_pages(struct drm_device *dev,
        return mem;
 }
 EXPORT_SYMBOL(drm_agp_bind_pages);
-
-#endif /* __OS_HAS_AGP */
index f7d5166f89b24ef740e854175927ad934652fed4..7bb3845d997492d5aa60092e7441f445d74e6104 100644 (file)
@@ -438,7 +438,8 @@ EXPORT_SYMBOL(drm_atomic_crtc_set_property);
  * consistent behavior you must call this function rather than the
  * driver hook directly.
  */
-int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
+static int
+drm_atomic_crtc_get_property(struct drm_crtc *crtc,
                const struct drm_crtc_state *state,
                struct drm_property *property, uint64_t *val)
 {
@@ -663,6 +664,25 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
        return 0;
 }
 
+static bool
+plane_switching_crtc(struct drm_atomic_state *state,
+                    struct drm_plane *plane,
+                    struct drm_plane_state *plane_state)
+{
+       if (!plane->state->crtc || !plane_state->crtc)
+               return false;
+
+       if (plane->state->crtc == plane_state->crtc)
+               return false;
+
+       /* This could be refined, but currently there's no helper or driver code
+        * to implement direct switching of active planes nor userspace to take
+        * advantage of more direct plane switching without the intermediate
+        * full OFF state.
+        */
+       return true;
+}
+
 /**
  * drm_atomic_plane_check - check plane state
  * @plane: plane to check
@@ -734,6 +754,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
                return -ENOSPC;
        }
 
+       if (plane_switching_crtc(state->state, plane, state)) {
+               DRM_DEBUG_ATOMIC("[PLANE:%d] switching CRTC directly\n",
+                                plane->base.id);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
index aecb5d69bc2dc1169a3b3e1088218f6fc0412145..87a2a446d2b759c6b1b8418187f5288003aff2d3 100644 (file)
  * add their own additional internal state.
  *
  * This library also provides default implementations for the check callback in
- * drm_atomic_helper_check and for the commit callback with
- * drm_atomic_helper_commit. But the individual stages and callbacks are expose
- * to allow drivers to mix and match and e.g. use the plane helpers only
+ * drm_atomic_helper_check() and for the commit callback with
+ * drm_atomic_helper_commit(). But the individual stages and callbacks are
+ * exposed to allow drivers to mix and match and e.g. use the plane helpers only
  * together with a driver private modeset implementation.
  *
  * This library also provides implementations for all the legacy driver
- * interfaces on top of the atomic interface. See drm_atomic_helper_set_config,
- * drm_atomic_helper_disable_plane, drm_atomic_helper_disable_plane and the
+ * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(),
+ * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the
  * various functions to implement set_property callbacks. New drivers must not
  * implement these functions themselves but must use the provided helpers.
  */
@@ -993,6 +993,22 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
  * object. This can still fail when e.g. the framebuffer reservation fails. For
  * now this doesn't implement asynchronous commits.
  *
+ * Note that right now this function does not support async commits, and hence
+ * driver writers must implement their own version for now. Also note that the
+ * default ordering of how the various stages are called is to match the legacy
+ * modeset helper library closest. One peculiarity of that is that it doesn't
+ * mesh well with runtime PM at all.
+ *
+ * For drivers supporting runtime PM the recommended sequence is
+ *
+ *     drm_atomic_helper_commit_modeset_disables(dev, state);
+ *
+ *     drm_atomic_helper_commit_modeset_enables(dev, state);
+ *
+ *     drm_atomic_helper_commit_planes(dev, state, true);
+ *
+ * See the kerneldoc entries for these three functions for more details.
+ *
  * RETURNS
  * Zero for success or -errno.
  */
@@ -1037,7 +1053,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
 
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
-       drm_atomic_helper_commit_planes(dev, state);
+       drm_atomic_helper_commit_planes(dev, state, false);
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
@@ -1077,7 +1093,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
  * work item, which allows nice concurrent updates on disjoint sets of crtcs.
  *
  * 3. The software state is updated synchronously with
- * drm_atomic_helper_swap_state. Doing this under the protection of all modeset
+ * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
  * locks means concurrent callers never see inconsistent state. And doing this
  * while it's guaranteed that no relevant async worker runs means that async
  * workers do not need grab any locks. Actually they must not grab locks, for
@@ -1111,17 +1127,14 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
                const struct drm_plane_helper_funcs *funcs;
                struct drm_plane *plane = state->planes[i];
                struct drm_plane_state *plane_state = state->plane_states[i];
-               struct drm_framebuffer *fb;
 
                if (!plane)
                        continue;
 
                funcs = plane->helper_private;
 
-               fb = plane_state->fb;
-
-               if (fb && funcs->prepare_fb) {
-                       ret = funcs->prepare_fb(plane, fb, plane_state);
+               if (funcs->prepare_fb) {
+                       ret = funcs->prepare_fb(plane, plane_state);
                        if (ret)
                                goto fail;
                }
@@ -1134,17 +1147,14 @@ fail:
                const struct drm_plane_helper_funcs *funcs;
                struct drm_plane *plane = state->planes[i];
                struct drm_plane_state *plane_state = state->plane_states[i];
-               struct drm_framebuffer *fb;
 
                if (!plane)
                        continue;
 
                funcs = plane->helper_private;
 
-               fb = state->plane_states[i]->fb;
-
-               if (fb && funcs->cleanup_fb)
-                       funcs->cleanup_fb(plane, fb, plane_state);
+               if (funcs->cleanup_fb)
+                       funcs->cleanup_fb(plane, plane_state);
 
        }
 
@@ -1152,10 +1162,16 @@ fail:
 }
 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
 
+bool plane_crtc_active(struct drm_plane_state *state)
+{
+       return state->crtc && state->crtc->state->active;
+}
+
 /**
  * drm_atomic_helper_commit_planes - commit plane state
  * @dev: DRM device
  * @old_state: atomic state object with old state structures
+ * @active_only: Only commit on active CRTC if set
  *
  * This function commits the new plane state using the plane and atomic helper
  * functions for planes and crtcs. It assumes that the atomic state has already
@@ -1168,9 +1184,26 @@ EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
  * Note that this function does all plane updates across all CRTCs in one step.
  * If the hardware can't support this approach look at
  * drm_atomic_helper_commit_planes_on_crtc() instead.
+ *
+ * Plane parameters can be updated by applications while the associated CRTC is
+ * disabled. The DRM/KMS core will store the parameters in the plane state,
+ * which will be available to the driver when the CRTC is turned on. As a result
+ * most drivers don't need to be immediately notified of plane updates for a
+ * disabled CRTC.
+ *
+ * Unless otherwise needed, drivers are advised to set the @active_only
+ * parameters to true in order not to receive plane update notifications related
+ * to a disabled CRTC. This avoids the need to manually ignore plane updates in
+ * driver code when the driver and/or hardware can't or just don't need to deal
+ * with updates on disabled CRTCs, for example when supporting runtime PM.
+ *
+ * The drm_atomic_helper_commit() default implementation only sets @active_only
+ * to false to most closely match the behaviour of the legacy helpers. This should
+ * not be copied blindly by drivers.
  */
 void drm_atomic_helper_commit_planes(struct drm_device *dev,
-                                    struct drm_atomic_state *old_state)
+                                    struct drm_atomic_state *old_state,
+                                    bool active_only)
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state;
@@ -1186,25 +1219,43 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
                if (!funcs || !funcs->atomic_begin)
                        continue;
 
+               if (active_only && !crtc->state->active)
+                       continue;
+
                funcs->atomic_begin(crtc, old_crtc_state);
        }
 
        for_each_plane_in_state(old_state, plane, old_plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
+               bool disabling;
 
                funcs = plane->helper_private;
 
                if (!funcs)
                        continue;
 
+               disabling = drm_atomic_plane_disabling(plane, old_plane_state);
+
+               if (active_only) {
+                       /*
+                        * Skip planes related to inactive CRTCs. If the plane
+                        * is enabled use the state of the current CRTC. If the
+                        * plane is being disabled use the state of the old
+                        * CRTC to avoid skipping planes being disabled on an
+                        * active CRTC.
+                        */
+                       if (!disabling && !plane_crtc_active(plane->state))
+                               continue;
+                       if (disabling && !plane_crtc_active(old_plane_state))
+                               continue;
+               }
+
                /*
                 * Special-case disabling the plane if drivers support it.
                 */
-               if (drm_atomic_plane_disabling(plane, old_plane_state) &&
-                   funcs->atomic_disable)
+               if (disabling && funcs->atomic_disable)
                        funcs->atomic_disable(plane, old_plane_state);
-               else if (plane->state->crtc ||
-                        drm_atomic_plane_disabling(plane, old_plane_state))
+               else if (plane->state->crtc || disabling)
                        funcs->atomic_update(plane, old_plane_state);
        }
 
@@ -1216,6 +1267,9 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
                if (!funcs || !funcs->atomic_flush)
                        continue;
 
+               if (active_only && !crtc->state->active)
+                       continue;
+
                funcs->atomic_flush(crtc, old_crtc_state);
        }
 }
@@ -1300,14 +1354,11 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
 
        for_each_plane_in_state(old_state, plane, plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
-               struct drm_framebuffer *old_fb;
 
                funcs = plane->helper_private;
 
-               old_fb = plane_state->fb;
-
-               if (old_fb && funcs->cleanup_fb)
-                       funcs->cleanup_fb(plane, old_fb, plane_state);
+               if (funcs->cleanup_fb)
+                       funcs->cleanup_fb(plane, plane_state);
        }
 }
 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
@@ -1334,7 +1385,7 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
  *
  * 4. Actually commit the hardware state.
  *
- * 5. Call drm_atomic_helper_cleanup_planes with @state, which since step 3
+ * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
  * contains the old state. Also do any other cleanup required with that state.
  */
 void drm_atomic_helper_swap_state(struct drm_device *dev,
@@ -1502,21 +1553,9 @@ retry:
                goto fail;
        }
 
-       ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+       ret = __drm_atomic_helper_disable_plane(plane, plane_state);
        if (ret != 0)
                goto fail;
-       drm_atomic_set_fb_for_plane(plane_state, NULL);
-       plane_state->crtc_x = 0;
-       plane_state->crtc_y = 0;
-       plane_state->crtc_h = 0;
-       plane_state->crtc_w = 0;
-       plane_state->src_x = 0;
-       plane_state->src_y = 0;
-       plane_state->src_h = 0;
-       plane_state->src_w = 0;
-
-       if (plane == plane->crtc->cursor)
-               state->legacy_cursor_update = true;
 
        ret = drm_atomic_commit(state);
        if (ret != 0)
@@ -1546,6 +1585,32 @@ backoff:
 }
 EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
 
+/* just used from fb-helper and atomic-helper: */
+int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
+               struct drm_plane_state *plane_state)
+{
+       int ret;
+
+       ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+       if (ret != 0)
+               return ret;
+
+       drm_atomic_set_fb_for_plane(plane_state, NULL);
+       plane_state->crtc_x = 0;
+       plane_state->crtc_y = 0;
+       plane_state->crtc_h = 0;
+       plane_state->crtc_w = 0;
+       plane_state->src_x = 0;
+       plane_state->src_y = 0;
+       plane_state->src_h = 0;
+       plane_state->src_w = 0;
+
+       if (plane->crtc && (plane == plane->crtc->cursor))
+               plane_state->state->legacy_cursor_update = true;
+
+       return 0;
+}
+
 static int update_output_state(struct drm_atomic_state *state,
                               struct drm_mode_set *set)
 {
@@ -1629,8 +1694,6 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
 {
        struct drm_atomic_state *state;
        struct drm_crtc *crtc = set->crtc;
-       struct drm_crtc_state *crtc_state;
-       struct drm_plane_state *primary_state;
        int ret = 0;
 
        state = drm_atomic_state_alloc(crtc->dev);
@@ -1639,17 +1702,54 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
 
        state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 retry:
-       crtc_state = drm_atomic_get_crtc_state(state, crtc);
-       if (IS_ERR(crtc_state)) {
-               ret = PTR_ERR(crtc_state);
+       ret = __drm_atomic_helper_set_config(set, state);
+       if (ret != 0)
                goto fail;
-       }
 
-       primary_state = drm_atomic_get_plane_state(state, crtc->primary);
-       if (IS_ERR(primary_state)) {
-               ret = PTR_ERR(primary_state);
+       ret = drm_atomic_commit(state);
+       if (ret != 0)
                goto fail;
-       }
+
+       /* Driver takes ownership of state on successful commit. */
+       return 0;
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+
+       return ret;
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       /*
+        * Someone might have exchanged the framebuffer while we dropped locks
+        * in the backoff code. We need to fix up the fb refcount tracking the
+        * core does for us.
+        */
+       crtc->primary->old_fb = crtc->primary->fb;
+
+       goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_set_config);
+
+/* just used from fb-helper and atomic-helper: */
+int __drm_atomic_helper_set_config(struct drm_mode_set *set,
+               struct drm_atomic_state *state)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_plane_state *primary_state;
+       struct drm_crtc *crtc = set->crtc;
+       int ret;
+
+       crtc_state = drm_atomic_get_crtc_state(state, crtc);
+       if (IS_ERR(crtc_state))
+               return PTR_ERR(crtc_state);
+
+       primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+       if (IS_ERR(primary_state))
+               return PTR_ERR(primary_state);
 
        if (!set->mode) {
                WARN_ON(set->fb);
@@ -1657,13 +1757,13 @@ retry:
 
                ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
                if (ret != 0)
-                       goto fail;
+                       return ret;
 
                crtc_state->active = false;
 
                ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
                if (ret != 0)
-                       goto fail;
+                       return ret;
 
                drm_atomic_set_fb_for_plane(primary_state, NULL);
 
@@ -1675,13 +1775,14 @@ retry:
 
        ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
        if (ret != 0)
-               goto fail;
+               return ret;
 
        crtc_state->active = true;
 
        ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
        if (ret != 0)
-               goto fail;
+               return ret;
+
        drm_atomic_set_fb_for_plane(primary_state, set->fb);
        primary_state->crtc_x = 0;
        primary_state->crtc_y = 0;
@@ -1695,35 +1796,10 @@ retry:
 commit:
        ret = update_output_state(state, set);
        if (ret)
-               goto fail;
-
-       ret = drm_atomic_commit(state);
-       if (ret != 0)
-               goto fail;
+               return ret;
 
-       /* Driver takes ownership of state on successful commit. */
        return 0;
-fail:
-       if (ret == -EDEADLK)
-               goto backoff;
-
-       drm_atomic_state_free(state);
-
-       return ret;
-backoff:
-       drm_atomic_state_clear(state);
-       drm_atomic_legacy_backoff(state);
-
-       /*
-        * Someone might have exchanged the framebuffer while we dropped locks
-        * in the backoff code. We need to fix up the fb refcount tracking the
-        * core does for us.
-        */
-       crtc->primary->old_fb = crtc->primary->fb;
-
-       goto retry;
 }
-EXPORT_SYMBOL(drm_atomic_helper_set_config);
 
 /**
  * drm_atomic_helper_crtc_set_property - helper for crtc properties
@@ -2332,6 +2408,84 @@ drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
 
+/**
+ * drm_atomic_helper_duplicate_state - duplicate an atomic state object
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Makes a copy of the current atomic state by looping over all objects and
+ * duplicating their respective states.
+ *
+ * Note that this treats atomic state as persistent between save and restore.
+ * Drivers must make sure that this is possible and won't result in confusion
+ * or erroneous behaviour.
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * A pointer to the copy of the atomic state object on success or an
+ * ERR_PTR()-encoded error code on failure.
+ */
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+                                 struct drm_modeset_acquire_ctx *ctx)
+{
+       struct drm_atomic_state *state;
+       struct drm_connector *conn;
+       struct drm_plane *plane;
+       struct drm_crtc *crtc;
+       int err = 0;
+
+       state = drm_atomic_state_alloc(dev);
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+
+       state->acquire_ctx = ctx;
+
+       drm_for_each_crtc(crtc, dev) {
+               struct drm_crtc_state *crtc_state;
+
+               crtc_state = drm_atomic_get_crtc_state(state, crtc);
+               if (IS_ERR(crtc_state)) {
+                       err = PTR_ERR(crtc_state);
+                       goto free;
+               }
+       }
+
+       drm_for_each_plane(plane, dev) {
+               struct drm_plane_state *plane_state;
+
+               plane_state = drm_atomic_get_plane_state(state, plane);
+               if (IS_ERR(plane_state)) {
+                       err = PTR_ERR(plane_state);
+                       goto free;
+               }
+       }
+
+       drm_for_each_connector(conn, dev) {
+               struct drm_connector_state *conn_state;
+
+               conn_state = drm_atomic_get_connector_state(state, conn);
+               if (IS_ERR(conn_state)) {
+                       err = PTR_ERR(conn_state);
+                       goto free;
+               }
+       }
+
+       /* clear the acquire context so that it isn't accidentally reused */
+       state->acquire_ctx = NULL;
+
+free:
+       if (err < 0) {
+               drm_atomic_state_free(state);
+               state = ERR_PTR(err);
+       }
+
+       return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
+
 /**
  * __drm_atomic_helper_connector_destroy_state - release connector state
  * @connector: connector object
index 569064a00693eb2932186d72eddfda8b2aa74d5c..f1a204d253cce0bfb2616cd4a9b0ebb400e5718c 100644 (file)
@@ -582,7 +582,7 @@ static void drm_cleanup_buf_error(struct drm_device * dev,
        }
 }
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 /**
  * Add AGP buffers for DMA transfers.
  *
@@ -756,7 +756,7 @@ int drm_legacy_addbufs_agp(struct drm_device *dev,
        return 0;
 }
 EXPORT_SYMBOL(drm_legacy_addbufs_agp);
-#endif                         /* __OS_HAS_AGP */
+#endif /* CONFIG_AGP */
 
 int drm_legacy_addbufs_pci(struct drm_device *dev,
                           struct drm_buf_desc *request)
@@ -1145,7 +1145,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
        if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
                return -EINVAL;
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (request->flags & _DRM_AGP_BUFFER)
                ret = drm_legacy_addbufs_agp(dev, request);
        else
index 33d877c65ced6a3c138af9ab47c824038410be0b..e7c842289568e48c6bfab801e75c2296a0b8b824 100644 (file)
@@ -538,7 +538,12 @@ EXPORT_SYMBOL(drm_framebuffer_reference);
  */
 void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
 {
-       struct drm_device *dev = fb->dev;
+       struct drm_device *dev;
+
+       if (!fb)
+               return;
+
+       dev = fb->dev;
 
        mutex_lock(&dev->mode_config.fb_lock);
        /* Mark fb as reaped and drop idr ref. */
@@ -589,12 +594,17 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
  */
 void drm_framebuffer_remove(struct drm_framebuffer *fb)
 {
-       struct drm_device *dev = fb->dev;
+       struct drm_device *dev;
        struct drm_crtc *crtc;
        struct drm_plane *plane;
        struct drm_mode_set set;
        int ret;
 
+       if (!fb)
+               return;
+
+       dev = fb->dev;
+
        WARN_ON(!list_empty(&fb->filp_head));
 
        /*
@@ -1509,7 +1519,7 @@ EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
  */
 int drm_mode_create_tv_properties(struct drm_device *dev,
                                  unsigned int num_modes,
-                                 char *modes[])
+                                 const char * const modes[])
 {
        struct drm_property *tv_selector;
        struct drm_property *tv_subconnector;
@@ -3310,14 +3320,11 @@ int drm_mode_rmfb(struct drm_device *dev,
        if (!found)
                goto fail_lookup;
 
-       /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
-       __drm_framebuffer_unregister(dev, fb);
-
        list_del_init(&fb->filp_head);
        mutex_unlock(&dev->mode_config.fb_lock);
        mutex_unlock(&file_priv->fbs_lock);
 
-       drm_framebuffer_remove(fb);
+       drm_framebuffer_unreference(fb);
 
        return 0;
 
@@ -3484,7 +3491,6 @@ out_err1:
  */
 void drm_fb_release(struct drm_file *priv)
 {
-       struct drm_device *dev = priv->minor->dev;
        struct drm_framebuffer *fb, *tfb;
 
        /*
@@ -3498,16 +3504,10 @@ void drm_fb_release(struct drm_file *priv)
         * at it any more.
         */
        list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
-
-               mutex_lock(&dev->mode_config.fb_lock);
-               /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
-               __drm_framebuffer_unregister(dev, fb);
-               mutex_unlock(&dev->mode_config.fb_lock);
-
                list_del_init(&fb->filp_head);
 
-               /* This will also drop the fpriv->fbs reference. */
-               drm_framebuffer_remove(fb);
+               /* This drops the fpriv->fbs reference. */
+               drm_framebuffer_unreference(fb);
        }
 }
 
@@ -5629,7 +5629,8 @@ unsigned int drm_rotation_simplify(unsigned int rotation,
 {
        if (rotation & ~supported_rotations) {
                rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
-               rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
+               rotation = (rotation & DRM_REFLECT_MASK) |
+                          BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
        }
 
        return rotation;
@@ -5732,7 +5733,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
         */
        WARN_ON(!list_empty(&dev->mode_config.fb_list));
        list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
-               drm_framebuffer_remove(fb);
+               drm_framebuffer_free(&fb->refcount);
        }
 
        list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
index 291734e87fca7457da9eb3ec8e0ba80f262bd232..9535c5b60387281a8a95929c09201dd086fff9cc 100644 (file)
@@ -424,6 +424,19 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
               I2C_FUNC_10BIT_ADDR;
 }
 
+static void drm_dp_i2c_msg_write_status_update(struct drm_dp_aux_msg *msg)
+{
+       /*
+        * In case of i2c defer or short i2c ack reply to a write,
+        * we need to switch to WRITE_STATUS_UPDATE to drain the
+        * rest of the message
+        */
+       if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE) {
+               msg->request &= DP_AUX_I2C_MOT;
+               msg->request |= DP_AUX_I2C_WRITE_STATUS_UPDATE;
+       }
+}
+
 #define AUX_PRECHARGE_LEN 10 /* 10 to 16 */
 #define AUX_SYNC_LEN (16 + 4) /* preamble + AUX_SYNC_END */
 #define AUX_STOP_LEN 4
@@ -579,6 +592,8 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                         * Both native ACK and I2C ACK replies received. We
                         * can assume the transfer was successful.
                         */
+                       if (ret != msg->size)
+                               drm_dp_i2c_msg_write_status_update(msg);
                        return ret;
 
                case DP_AUX_I2C_REPLY_NACK:
@@ -596,6 +611,8 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                        if (defer_i2c < 7)
                                defer_i2c++;
                        usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
+                       drm_dp_i2c_msg_write_status_update(msg);
+
                        continue;
 
                default:
@@ -608,6 +625,14 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        return -EREMOTEIO;
 }
 
+static void drm_dp_i2c_msg_set_request(struct drm_dp_aux_msg *msg,
+                                      const struct i2c_msg *i2c_msg)
+{
+       msg->request = (i2c_msg->flags & I2C_M_RD) ?
+               DP_AUX_I2C_READ : DP_AUX_I2C_WRITE;
+       msg->request |= DP_AUX_I2C_MOT;
+}
+
 /*
  * Keep retrying drm_dp_i2c_do_msg until all data has been transferred.
  *
@@ -661,10 +686,7 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
 
        for (i = 0; i < num; i++) {
                msg.address = msgs[i].addr;
-               msg.request = (msgs[i].flags & I2C_M_RD) ?
-                       DP_AUX_I2C_READ :
-                       DP_AUX_I2C_WRITE;
-               msg.request |= DP_AUX_I2C_MOT;
+               drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
                /* Send a bare address packet to start the transaction.
                 * Zero sized messages specify an address only (bare
                 * address) transaction.
@@ -672,6 +694,13 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
                msg.buffer = NULL;
                msg.size = 0;
                err = drm_dp_i2c_do_msg(aux, &msg);
+
+               /*
+                * Reset msg.request in case in case it got
+                * changed into a WRITE_STATUS_UPDATE.
+                */
+               drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
+
                if (err < 0)
                        break;
                /* We want each transaction to be as large as possible, but
@@ -684,6 +713,13 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
                        msg.size = min(transfer_size, msgs[i].len - j);
 
                        err = drm_dp_i2c_drain_msg(aux, &msg);
+
+                       /*
+                        * Reset msg.request in case in case it got
+                        * changed into a WRITE_STATUS_UPDATE.
+                        */
+                       drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
+
                        if (err < 0)
                                break;
                        transfer_size = err;
index 53d09a19f7e13cb8c138f97da613df42e7170491..d01f8d6c5fdb0297aa13371716c6f1f2ceea1ba5 100644 (file)
@@ -55,7 +55,6 @@ module_param_named(debug, drm_debug, int, 0600);
 static DEFINE_SPINLOCK(drm_minor_lock);
 static struct idr drm_minors_idr;
 
-struct class *drm_class;
 static struct dentry *drm_debugfs_root;
 
 void drm_err(const char *format, ...)
@@ -397,16 +396,52 @@ void drm_minor_release(struct drm_minor *minor)
        drm_dev_unref(minor->dev);
 }
 
+/**
+ * DOC: driver instance overview
+ *
+ * A device instance for a drm driver is represented by struct &drm_device. This
+ * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
+ * callbacks implemented by the driver. The driver then needs to initialize all
+ * the various subsystems for the drm device like memory management, vblank
+ * handling, modesetting support and intial output configuration plus obviously
+ * initialize all the corresponding hardware bits. An important part of this is
+ * also calling drm_dev_set_unique() to set the userspace-visible unique name of
+ * this device instance. Finally when everything is up and running and ready for
+ * userspace the device instance can be published using drm_dev_register().
+ *
+ * There is also deprecated support for initalizing device instances using
+ * bus-specific helpers and the ->load() callback. But due to
+ * backwards-compatibility needs the device instance have to be published too
+ * early, which requires unpretty global locking to make safe and is therefore
+ * only support for existing drivers not yet converted to the new scheme.
+ *
+ * When cleaning up a device instance everything needs to be done in reverse:
+ * First unpublish the device instance with drm_dev_unregister(). Then clean up
+ * any other resources allocated at device initialization and drop the driver's
+ * reference to &drm_device using drm_dev_unref().
+ *
+ * Note that the lifetime rules for &drm_device instance has still a lot of
+ * historical baggage. Hence use the reference counting provided by
+ * drm_dev_ref() and drm_dev_unref() only carefully.
+ *
+ * Also note that embedding of &drm_device is currently not (yet) supported (but
+ * it would be easy to add). Drivers can store driver-private data in the
+ * dev_priv field of &drm_device.
+ */
+
 /**
  * drm_put_dev - Unregister and release a DRM device
  * @dev: DRM device
  *
  * Called at module unload time or when a PCI device is unplugged.
  *
- * Use of this function is discouraged. It will eventually go away completely.
- * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
- *
  * Cleans up all DRM device, calling drm_lastclose().
+ *
+ * Note: Use of this function is deprecated. It will eventually go away
+ * completely.  Please use drm_dev_unregister() and drm_dev_unref() explicitly
+ * instead to make sure that the device isn't userspace accessible any more
+ * while teardown is in progress, ensuring that userspace can't access an
+ * inconsistent state.
  */
 void drm_put_dev(struct drm_device *dev)
 {
@@ -519,7 +554,9 @@ static void drm_fs_inode_free(struct inode *inode)
  *
  * Allocate and initialize a new DRM device. No device registration is done.
  * Call drm_dev_register() to advertice the device to user space and register it
- * with other core subsystems.
+ * with other core subsystems. This should be done last in the device
+ * initialization sequence to make sure userspace can't access an inconsistent
+ * state.
  *
  * The initial ref-count of the object is 1. Use drm_dev_ref() and
  * drm_dev_unref() to take and drop further ref-counts.
@@ -566,6 +603,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
                if (ret)
                        goto err_minors;
+
+               WARN_ON(driver->suspend || driver->resume);
        }
 
        if (drm_core_check_feature(dev, DRIVER_RENDER)) {
@@ -672,6 +711,12 @@ EXPORT_SYMBOL(drm_dev_unref);
  *
  * Never call this twice on any device!
  *
+ * NOTE: To ensure backward compatibility with existing drivers method this
+ * function calls the ->load() method after registering the device nodes,
+ * creating race conditions. Usage of the ->load() methods is therefore
+ * deprecated, drivers must perform all initialization before calling
+ * drm_dev_register().
+ *
  * RETURNS:
  * 0 on success, negative error code on failure.
  */
@@ -719,6 +764,9 @@ EXPORT_SYMBOL(drm_dev_register);
  * Unregister the DRM device from the system. This does the reverse of
  * drm_dev_register() but does not deallocate the device. The caller must call
  * drm_dev_unref() to drop their final reference.
+ *
+ * This should be called first in the device teardown code to make sure
+ * userspace can't access the device instance any more.
  */
 void drm_dev_unregister(struct drm_device *dev)
 {
@@ -839,10 +887,9 @@ static int __init drm_core_init(void)
        if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
                goto err_p1;
 
-       drm_class = drm_sysfs_create(THIS_MODULE, "drm");
-       if (IS_ERR(drm_class)) {
+       ret = drm_sysfs_init();
+       if (ret < 0) {
                printk(KERN_ERR "DRM: Error creating drm class.\n");
-               ret = PTR_ERR(drm_class);
                goto err_p2;
        }
 
index 05bb7311ac5d151a1893b3802e333f28c8707df4..d895556be4f0339bddcc8beab09f8457a3842185 100644 (file)
@@ -2044,7 +2044,7 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
 static bool valid_inferred_mode(const struct drm_connector *connector,
                                const struct drm_display_mode *mode)
 {
-       struct drm_display_mode *m;
+       const struct drm_display_mode *m;
        bool ok = false;
 
        list_for_each_entry(m, &connector->probed_modes, head) {
@@ -3361,7 +3361,7 @@ EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
  * the sink doesn't support audio or video.
  */
 int drm_av_sync_delay(struct drm_connector *connector,
-                     struct drm_display_mode *mode)
+                     const struct drm_display_mode *mode)
 {
        int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
        int a, v;
@@ -3396,7 +3396,6 @@ EXPORT_SYMBOL(drm_av_sync_delay);
 /**
  * drm_select_eld - select one ELD from multiple HDMI/DP sinks
  * @encoder: the encoder just changed display mode
- * @mode: the adjusted display mode
  *
  * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
  * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
@@ -3404,8 +3403,7 @@ EXPORT_SYMBOL(drm_av_sync_delay);
  * Return: The connector associated with the first HDMI/DP sink that has ELD
  * attached to it.
  */
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
-                                    struct drm_display_mode *mode)
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder)
 {
        struct drm_connector *connector;
        struct drm_device *dev = encoder->dev;
index c5605fe4907ecc9b0becf833a9f94fa16815738a..698b8c3b09d9cb8c310ca4da832d84e9c8b332a0 100644 (file)
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
        "from built-in data or /lib/firmware instead. ");
 
 #define GENERIC_EDIDS 6
-static const char *generic_edid_name[GENERIC_EDIDS] = {
+static const char * const generic_edid_name[GENERIC_EDIDS] = {
        "edid/800x600.bin",
        "edid/1024x768.bin",
        "edid/1280x1024.bin",
@@ -264,20 +264,43 @@ out:
 int drm_load_edid_firmware(struct drm_connector *connector)
 {
        const char *connector_name = connector->name;
-       char *edidname = edid_firmware, *last, *colon;
+       char *edidname, *last, *colon, *fwstr, *edidstr, *fallback = NULL;
        int ret;
        struct edid *edid;
 
-       if (*edidname == '\0')
+       if (edid_firmware[0] == '\0')
                return 0;
 
-       colon = strchr(edidname, ':');
-       if (colon != NULL) {
-               if (strncmp(connector_name, edidname, colon - edidname))
-                       return 0;
-               edidname = colon + 1;
-               if (*edidname == '\0')
+       /*
+        * If there are multiple edid files specified and separated
+        * by commas, search through the list looking for one that
+        * matches the connector.
+        *
+        * If there's one or more that don't't specify a connector, keep
+        * the last one found one as a fallback.
+        */
+       fwstr = kstrdup(edid_firmware, GFP_KERNEL);
+       edidstr = fwstr;
+
+       while ((edidname = strsep(&edidstr, ","))) {
+               colon = strchr(edidname, ':');
+               if (colon != NULL) {
+                       if (strncmp(connector_name, edidname, colon - edidname))
+                               continue;
+                       edidname = colon + 1;
+                       break;
+               }
+
+               if (*edidname != '\0') /* corner case: multiple ',' */
+                       fallback = edidname;
+       }
+
+       if (!edidname) {
+               if (!fallback) {
+                       kfree(fwstr);
                        return 0;
+               }
+               edidname = fallback;
        }
 
        last = edidname + strlen(edidname) - 1;
@@ -285,6 +308,8 @@ int drm_load_edid_firmware(struct drm_connector *connector)
                *last = '\0';
 
        edid = edid_load(connector, edidname, connector_name);
+       kfree(fwstr);
+
        if (IS_ERR_OR_NULL(edid))
                return 0;
 
index ca08c472311bd3f6238f7513bc4ac26737228884..bd6d4ab2751250ab9af68f0aa9f91e0cb44463ce 100644 (file)
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+
+static bool drm_fbdev_emulation = true;
+module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600);
+MODULE_PARM_DESC(fbdev_emulation,
+                "Enable legacy fbdev emulation [default=true]");
 
 static LIST_HEAD(kernel_fb_helper_list);
 
@@ -99,6 +106,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
        struct drm_connector *connector;
        int i;
 
+       if (!drm_fbdev_emulation)
+               return 0;
+
        mutex_lock(&dev->mode_config.mutex);
        drm_for_each_connector(connector, dev) {
                struct drm_fb_helper_connector *fb_helper_connector;
@@ -129,6 +139,9 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
        struct drm_fb_helper_connector **temp;
        struct drm_fb_helper_connector *fb_helper_connector;
 
+       if (!drm_fbdev_emulation)
+               return 0;
+
        WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
        if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
                temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
@@ -184,6 +197,9 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
        struct drm_fb_helper_connector *fb_helper_connector;
        int i, j;
 
+       if (!drm_fbdev_emulation)
+               return 0;
+
        WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
 
        for (i = 0; i < fb_helper->connector_count; i++) {
@@ -320,15 +336,96 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
 }
 EXPORT_SYMBOL(drm_fb_helper_debug_leave);
 
-static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
+static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
+{
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_plane *plane;
+       struct drm_atomic_state *state;
+       int i, ret;
+
+       state = drm_atomic_state_alloc(dev);
+       if (!state)
+               return -ENOMEM;
+
+       state->acquire_ctx = dev->mode_config.acquire_ctx;
+retry:
+       drm_for_each_plane(plane, dev) {
+               struct drm_plane_state *plane_state;
+
+               plane->old_fb = plane->fb;
+
+               plane_state = drm_atomic_get_plane_state(state, plane);
+               if (IS_ERR(plane_state)) {
+                       ret = PTR_ERR(plane_state);
+                       goto fail;
+               }
+
+               ret = drm_atomic_plane_set_property(plane, plane_state,
+                               dev->mode_config.rotation_property,
+                               BIT(DRM_ROTATE_0));
+               if (ret != 0)
+                       goto fail;
+
+               /* disable non-primary: */
+               if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+                       continue;
+
+               ret = __drm_atomic_helper_disable_plane(plane, plane_state);
+               if (ret != 0)
+                       goto fail;
+       }
+
+       for(i = 0; i < fb_helper->crtc_count; i++) {
+               struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+
+               ret = __drm_atomic_helper_set_config(mode_set, state);
+               if (ret != 0)
+                       goto fail;
+       }
+
+       ret = drm_atomic_commit(state);
+
+fail:
+       drm_for_each_plane(plane, dev) {
+               if (ret == 0) {
+                       struct drm_framebuffer *new_fb = plane->state->fb;
+                       if (new_fb)
+                               drm_framebuffer_reference(new_fb);
+                       plane->fb = new_fb;
+                       plane->crtc = plane->state->crtc;
+
+                       if (plane->old_fb)
+                               drm_framebuffer_unreference(plane->old_fb);
+               }
+               plane->old_fb = NULL;
+       }
+
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       if (ret != 0)
+               drm_atomic_state_free(state);
+
+       return ret;
+
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       goto retry;
+}
+
+static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 {
        struct drm_device *dev = fb_helper->dev;
        struct drm_plane *plane;
-       bool error = false;
        int i;
 
        drm_warn_on_modeset_not_all_locked(dev);
 
+       if (fb_helper->atomic)
+               return restore_fbdev_mode_atomic(fb_helper);
+
        drm_for_each_plane(plane, dev) {
                if (plane->type != DRM_PLANE_TYPE_PRIMARY)
                        drm_plane_force_disable(plane);
@@ -348,18 +445,19 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
                if (crtc->funcs->cursor_set2) {
                        ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
                        if (ret)
-                               error = true;
+                               return ret;
                } else if (crtc->funcs->cursor_set) {
                        ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
                        if (ret)
-                               error = true;
+                               return ret;
                }
 
                ret = drm_mode_set_config_internal(mode_set);
                if (ret)
-                       error = true;
+                       return ret;
        }
-       return error;
+
+       return 0;
 }
 
 /**
@@ -369,12 +467,18 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
  * This should be called from driver's drm ->lastclose callback
  * when implementing an fbcon on top of kms using this helper. This ensures that
  * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * RETURNS:
+ * Zero if everything went ok, negative error code otherwise.
  */
-bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
 {
        struct drm_device *dev = fb_helper->dev;
-       bool ret;
-       bool do_delayed = false;
+       bool do_delayed;
+       int ret;
+
+       if (!drm_fbdev_emulation)
+               return -ENODEV;
 
        drm_modeset_lock_all(dev);
        ret = restore_fbdev_mode(fb_helper);
@@ -592,6 +696,9 @@ int drm_fb_helper_init(struct drm_device *dev,
        struct drm_crtc *crtc;
        int i;
 
+       if (!drm_fbdev_emulation)
+               return 0;
+
        if (!max_conn_count)
                return -EINVAL;
 
@@ -625,6 +732,8 @@ int drm_fb_helper_init(struct drm_device *dev,
                i++;
        }
 
+       fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
+
        return 0;
 out_free:
        drm_fb_helper_crtc_free(fb_helper);
@@ -714,6 +823,9 @@ EXPORT_SYMBOL(drm_fb_helper_release_fbi);
 
 void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
 {
+       if (!drm_fbdev_emulation)
+               return;
+
        if (!list_empty(&fb_helper->kernel_fb_list)) {
                list_del(&fb_helper->kernel_fb_list);
                if (list_empty(&kernel_fb_helper_list)) {
@@ -1122,6 +1234,57 @@ int drm_fb_helper_set_par(struct fb_info *info)
 }
 EXPORT_SYMBOL(drm_fb_helper_set_par);
 
+static int pan_display_atomic(struct fb_var_screeninfo *var,
+               struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_atomic_state *state;
+       int i, ret;
+
+       state = drm_atomic_state_alloc(dev);
+       if (!state)
+               return -ENOMEM;
+
+       state->acquire_ctx = dev->mode_config.acquire_ctx;
+retry:
+       for(i = 0; i < fb_helper->crtc_count; i++) {
+               struct drm_mode_set *mode_set;
+
+               mode_set = &fb_helper->crtc_info[i].mode_set;
+
+               mode_set->x = var->xoffset;
+               mode_set->y = var->yoffset;
+
+               ret = __drm_atomic_helper_set_config(mode_set, state);
+               if (ret != 0)
+                       goto fail;
+       }
+
+       ret = drm_atomic_commit(state);
+       if (ret != 0)
+               goto fail;
+
+       info->var.xoffset = var->xoffset;
+       info->var.yoffset = var->yoffset;
+
+       return 0;
+
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+
+       return ret;
+
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       goto retry;
+}
+
 /**
  * drm_fb_helper_pan_display - implementation for ->fb_pan_display
  * @var: updated screen information
@@ -1145,6 +1308,11 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
                return -EBUSY;
        }
 
+       if (fb_helper->atomic) {
+               ret = pan_display_atomic(var, info);
+               goto unlock;
+       }
+
        for (i = 0; i < fb_helper->crtc_count; i++) {
                modeset = &fb_helper->crtc_info[i].mode_set;
 
@@ -1159,6 +1327,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
                        }
                }
        }
+unlock:
        drm_modeset_unlock_all(dev);
        return ret;
 }
@@ -1934,6 +2103,9 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
        struct drm_device *dev = fb_helper->dev;
        int count = 0;
 
+       if (!drm_fbdev_emulation)
+               return 0;
+
        mutex_lock(&dev->mode_config.mutex);
        count = drm_fb_helper_probe_connector_modes(fb_helper,
                                                    dev->mode_config.max_width,
@@ -1977,6 +2149,9 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
        struct drm_device *dev = fb_helper->dev;
        u32 max_width, max_height;
 
+       if (!drm_fbdev_emulation)
+               return 0;
+
        mutex_lock(&fb_helper->dev->mode_config.mutex);
        if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
                fb_helper->delayed_hotplug = true;
index 059af01bd07a9916e560b772e4a5db872583a54b..43cbda3306ac060674b2ec6dfd233ace6a93be33 100644 (file)
@@ -73,7 +73,7 @@ int drm_authmagic(struct drm_device *dev, void *data,
 /* drm_sysfs.c */
 extern struct class *drm_class;
 
-struct class *drm_sysfs_create(struct module *owner, char *name);
+int drm_sysfs_init(void);
 void drm_sysfs_destroy(void);
 struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
 int drm_sysfs_connector_add(struct drm_connector *connector);
index ddfa6014c2c2e2cb0e9aa9b99a8fa41c7eb926f2..57676f8d7ecfe70c54fd79a3d714f196ff1406a5 100644 (file)
@@ -720,7 +720,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
        return 0;
 }
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 typedef struct drm_agp_mode32 {
        u32 mode;       /**< AGP mode */
 } drm_agp_mode32_t;
@@ -882,7 +882,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
 
        return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
 }
-#endif                         /* __OS_HAS_AGP */
+#endif /* CONFIG_AGP */
 
 typedef struct drm_scatter_gather32 {
        u32 size;       /**< In bytes -- will round to page boundary */
@@ -1090,7 +1090,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
        [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
        [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
        [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
        [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
index d93e7378c0779dd54be72447421aa49cc12c3f4f..530c501422fd696ef3c5a1c1e526728b9d709ed0 100644 (file)
@@ -40,7 +40,7 @@
 static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
-/**
+/*
  * Get the bus id.
  *
  * \param inode device inode.
@@ -75,7 +75,7 @@ drm_unset_busid(struct drm_device *dev,
        master->unique_len = 0;
 }
 
-/**
+/*
  * Set the bus id.
  *
  * \param inode device inode.
@@ -149,7 +149,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
        return 0;
 }
 
-/**
+/*
  * Get a mapping information.
  *
  * \param inode device inode.
@@ -201,7 +201,7 @@ static int drm_getmap(struct drm_device *dev, void *data,
        return 0;
 }
 
-/**
+/*
  * Get client information.
  *
  * \param inode device inode.
@@ -244,7 +244,7 @@ static int drm_getclient(struct drm_device *dev, void *data,
        }
 }
 
-/**
+/*
  * Get statistics information.
  *
  * \param inode device inode.
@@ -265,7 +265,7 @@ static int drm_getstats(struct drm_device *dev, void *data,
        return 0;
 }
 
-/**
+/*
  * Get device/driver capabilities
  */
 static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
@@ -318,7 +318,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
        return 0;
 }
 
-/**
+/*
  * Set device/driver capabilities
  */
 static int
@@ -352,7 +352,7 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
        return 0;
 }
 
-/**
+/*
  * Setversion ioctl.
  *
  * \param inode device inode.
@@ -406,7 +406,18 @@ done:
        return retcode;
 }
 
-/** No-op ioctl. */
+/**
+ * drm_noop - DRM no-op ioctl implemntation
+ * @dev: DRM device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: DRM file for the ioctl call
+ *
+ * This no-op implementation for drm ioctls is useful for deprecated
+ * functionality where we can't return a failure code because existing userspace
+ * checks the result of the ioctl, but doesn't care about the action.
+ *
+ * Always returns successfully with 0.
+ */
 int drm_noop(struct drm_device *dev, void *data,
             struct drm_file *file_priv)
 {
@@ -416,6 +427,28 @@ int drm_noop(struct drm_device *dev, void *data,
 EXPORT_SYMBOL(drm_noop);
 
 /**
+ * drm_invalid_op - DRM invalid ioctl implemntation
+ * @dev: DRM device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: DRM file for the ioctl call
+ *
+ * This no-op implementation for drm ioctls is useful for deprecated
+ * functionality where we really don't want to allow userspace to call the ioctl
+ * any more. This is the case for old ums interfaces for drivers that
+ * transitioned to kms gradually and so kept the old legacy tables around. This
+ * only applies to radeon and i915 kms drivers, other drivers shouldn't need to
+ * use this function.
+ *
+ * Always fails with a return value of -EINVAL.
+ */
+int drm_invalid_op(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv)
+{
+       return -EINVAL;
+}
+EXPORT_SYMBOL(drm_invalid_op);
+
+/*
  * Copy and IOCTL return string to user space
  */
 static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
@@ -438,7 +471,7 @@ static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
        return 0;
 }
 
-/**
+/*
  * Get version information
  *
  * \param inode device inode.
@@ -470,7 +503,7 @@ static int drm_version(struct drm_device *dev, void *data,
        return err;
 }
 
-/**
+/*
  * drm_ioctl_permit - Check ioctl permissions against caller
  *
  * @flags: ioctl permission flags.
@@ -518,7 +551,7 @@ EXPORT_SYMBOL(drm_ioctl_permit);
                .name = #ioctl                  \
        }
 
-/** Ioctl table */
+/* Ioctl table */
 static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
                      DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
@@ -571,7 +604,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
        DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -635,16 +668,16 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 #define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
 
 /**
- * Called whenever a process performs an ioctl on /dev/drm.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * drm_ioctl - ioctl callback implementation for DRM drivers
+ * @filp: file this ioctl is called on
+ * @cmd: ioctl cmd number
+ * @arg: user argument
  *
  * Looks up the ioctl function in the ::ioctls table, checking for root
  * previleges if so required, and dispatches to the respective function.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
  */
 long drm_ioctl(struct file *filp,
              unsigned int cmd, unsigned long arg)
@@ -754,9 +787,15 @@ EXPORT_SYMBOL(drm_ioctl);
 
 /**
  * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
+ * @nr: ioctl number
+ * @flags: where to return the ioctl permission flags
+ *
+ * This ioctl is only used by the vmwgfx driver to augment the access checks
+ * done by the drm core and insofar a pretty decent layering violation. This
+ * shouldn't be used by any drivers.
  *
- * @nr: Ioctl number.
- * @flags: Where to return the ioctl permission flags
+ * Returns:
+ * True if the @nr corresponds to a DRM core ioctl numer, false otherwise.
  */
 bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
 {
index 22d207e211e719b76b17c2a470d8323bb73ae6ba..7bdf247b9681b59421b73cce7bb6c5e015073928 100644 (file)
@@ -74,22 +74,22 @@ module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
 
-static void store_vblank(struct drm_device *dev, int crtc,
+static void store_vblank(struct drm_device *dev, unsigned int pipe,
                         u32 vblank_count_inc,
-                        struct timeval *t_vblank)
+                        struct timeval *t_vblank, u32 last)
 {
-       struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
        u32 tslot;
 
        assert_spin_locked(&dev->vblank_time_lock);
 
-       if (t_vblank) {
-               /* All writers hold the spinlock, but readers are serialized by
-                * the latching of vblank->count below.
-                */
-               tslot = vblank->count + vblank_count_inc;
-               vblanktimestamp(dev, crtc, tslot) = *t_vblank;
-       }
+       vblank->last = last;
+
+       /* All writers hold the spinlock, but readers are serialized by
+        * the latching of vblank->count below.
+        */
+       tslot = vblank->count + vblank_count_inc;
+       vblanktimestamp(dev, pipe, tslot) = *t_vblank;
 
        /*
         * vblank timestamp updates are protected on the write side with
@@ -104,13 +104,61 @@ static void store_vblank(struct drm_device *dev, int crtc,
        smp_wmb();
 }
 
+/**
+ * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
+ * @dev: DRM device
+ * @pipe: index of CRTC for which to reset the timestamp
+ *
+ * Reset the stored timestamp for the current vblank count to correspond
+ * to the last vblank occurred.
+ *
+ * Only to be called from drm_vblank_on().
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe)
+{
+       u32 cur_vblank;
+       bool rc;
+       struct timeval t_vblank;
+       int count = DRM_TIMESTAMP_MAXRETRIES;
+
+       spin_lock(&dev->vblank_time_lock);
+
+       /*
+        * sample the current counter to avoid random jumps
+        * when drm_vblank_enable() applies the diff
+        */
+       do {
+               cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
+               rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
+       } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe) && --count > 0);
+
+       /*
+        * Only reinitialize corresponding vblank timestamp if high-precision query
+        * available and didn't fail. Otherwise reinitialize delayed at next vblank
+        * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
+        */
+       if (!rc)
+               t_vblank = (struct timeval) {0, 0};
+
+       /*
+        * +1 to make sure user will never see the same
+        * vblank counter value before and after a modeset
+        */
+       store_vblank(dev, pipe, 1, &t_vblank, cur_vblank);
+
+       spin_unlock(&dev->vblank_time_lock);
+}
+
 /**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
  * @pipe: counter to update
  *
  * Call back into the driver to update the appropriate vblank counter
- * (specified by @crtc).  Deal with wraparound, if it occurred, and
+ * (specified by @pipe).  Deal with wraparound, if it occurred, and
  * update the last read value so we can deal with wraparound on the next
  * call if necessary.
  *
@@ -120,12 +168,15 @@ static void store_vblank(struct drm_device *dev, int crtc,
  * Note: caller must hold dev->vbl_lock since this reads & writes
  * device vblank fields.
  */
-static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe)
+static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
+                                   unsigned long flags)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
        u32 cur_vblank, diff;
        bool rc;
        struct timeval t_vblank;
+       int count = DRM_TIMESTAMP_MAXRETRIES;
+       int framedur_ns = vblank->framedur_ns;
 
        /*
         * Interrupts were disabled prior to this call, so deal with counter
@@ -141,33 +192,54 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe)
         */
        do {
                cur_vblank = dev->driver->get_vblank_counter(dev, pipe);
-               rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, 0);
-       } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe));
+               rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, flags);
+       } while (cur_vblank != dev->driver->get_vblank_counter(dev, pipe) && --count > 0);
+
+       if (dev->max_vblank_count != 0) {
+               /* trust the hw counter when it's around */
+               diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
+       } else if (rc && framedur_ns) {
+               const struct timeval *t_old;
+               u64 diff_ns;
 
-       /* Deal with counter wrap */
-       diff = cur_vblank - vblank->last;
-       if (cur_vblank < vblank->last) {
-               diff += dev->max_vblank_count + 1;
+               t_old = &vblanktimestamp(dev, pipe, vblank->count);
+               diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
+
+               /*
+                * Figure out how many vblanks we've missed based
+                * on the difference in the timestamps and the
+                * frame/field duration.
+                */
+               diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
 
-               DRM_DEBUG("last_vblank[%u]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-                         pipe, vblank->last, cur_vblank, diff);
+               if (diff == 0 && flags & DRM_CALLED_FROM_VBLIRQ)
+                       DRM_DEBUG("crtc %u: Redundant vblirq ignored."
+                                 " diff_ns = %lld, framedur_ns = %d)\n",
+                                 pipe, (long long) diff_ns, framedur_ns);
+       } else {
+               /* some kind of default for drivers w/o accurate vbl timestamping */
+               diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
        }
 
-       DRM_DEBUG("updating vblank count on crtc %u, missed %d\n",
-                 pipe, diff);
+       DRM_DEBUG("updating vblank count on crtc %u:"
+                 " current=%u, diff=%u, hw=%u hw_last=%u\n",
+                 pipe, vblank->count, diff, cur_vblank, vblank->last);
 
-       if (diff == 0)
+       if (diff == 0) {
+               WARN_ON_ONCE(cur_vblank != vblank->last);
                return;
+       }
 
        /*
         * Only reinitialize corresponding vblank timestamp if high-precision query
-        * available and didn't fail. Otherwise reinitialize delayed at next vblank
-        * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
+        * available and didn't fail, or we were called from the vblank interrupt.
+        * Otherwise reinitialize delayed at next vblank interrupt and assign 0
+        * for now, to mark the vblanktimestamp as invalid.
         */
-       if (!rc)
+       if (!rc && (flags & DRM_CALLED_FROM_VBLIRQ) == 0)
                t_vblank = (struct timeval) {0, 0};
 
-       store_vblank(dev, pipe, diff, &t_vblank);
+       store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
 }
 
 /*
@@ -180,11 +252,6 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
        unsigned long irqflags;
-       u32 vblcount;
-       s64 diff_ns;
-       bool vblrc;
-       struct timeval tvblank;
-       int count = DRM_TIMESTAMP_MAXRETRIES;
 
        /* Prevent vblank irq processing while disabling vblank irqs,
         * so no updates of timestamps or count can happen after we've
@@ -192,26 +259,6 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
         */
        spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
-       /*
-        * If the vblank interrupt was already disabled update the count
-        * and timestamp to maintain the appearance that the counter
-        * has been ticking all along until this time. This makes the
-        * count account for the entire time between drm_vblank_on() and
-        * drm_vblank_off().
-        *
-        * But only do this if precise vblank timestamps are available.
-        * Otherwise we might read a totally bogus timestamp since drivers
-        * lacking precise timestamp support rely upon sampling the system clock
-        * at vblank interrupt time. Which obviously won't work out well if the
-        * vblank interrupt is disabled.
-        */
-       if (!vblank->enabled &&
-           drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0)) {
-               drm_update_vblank_count(dev, pipe);
-               spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
-               return;
-       }
-
        /*
         * Only disable vblank interrupts if they're enabled. This avoids
         * calling the ->disable_vblank() operation in atomic context with the
@@ -222,47 +269,13 @@ static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
                vblank->enabled = false;
        }
 
-       /* No further vblank irq's will be processed after
-        * this point. Get current hardware vblank count and
-        * vblank timestamp, repeat until they are consistent.
-        *
-        * FIXME: There is still a race condition here and in
-        * drm_update_vblank_count() which can cause off-by-one
-        * reinitialization of software vblank counter. If gpu
-        * vblank counter doesn't increment exactly at the leading
-        * edge of a vblank interval, then we can lose 1 count if
-        * we happen to execute between start of vblank and the
-        * delayed gpu counter increment.
-        */
-       do {
-               vblank->last = dev->driver->get_vblank_counter(dev, pipe);
-               vblrc = drm_get_last_vbltimestamp(dev, pipe, &tvblank, 0);
-       } while (vblank->last != dev->driver->get_vblank_counter(dev, pipe) && (--count) && vblrc);
-
-       if (!count)
-               vblrc = 0;
-
-       /* Compute time difference to stored timestamp of last vblank
-        * as updated by last invocation of drm_handle_vblank() in vblank irq.
-        */
-       vblcount = vblank->count;
-       diff_ns = timeval_to_ns(&tvblank) -
-                 timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
-
-       /* If there is at least 1 msec difference between the last stored
-        * timestamp and tvblank, then we are currently executing our
-        * disable inside a new vblank interval, the tvblank timestamp
-        * corresponds to this new vblank interval and the irq handler
-        * for this vblank didn't run yet and won't run due to our disable.
-        * Therefore we need to do the job of drm_handle_vblank() and
-        * increment the vblank counter by one to account for this vblank.
-        *
-        * Skip this step if there isn't any high precision timestamp
-        * available. In that case we can't account for this and just
-        * hope for the best.
+       /*
+        * Always update the count and timestamp to maintain the
+        * appearance that the counter has been ticking all along until
+        * this time. This makes the count account for the entire time
+        * between drm_vblank_on() and drm_vblank_off().
         */
-       if (vblrc && (abs64(diff_ns) > 1000000))
-               store_vblank(dev, pipe, 1, &tvblank);
+       drm_update_vblank_count(dev, pipe, 0);
 
        spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 }
@@ -603,7 +616,8 @@ int drm_control(struct drm_device *dev, void *data,
 void drm_calc_timestamping_constants(struct drm_crtc *crtc,
                                     const struct drm_display_mode *mode)
 {
-       int linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+       struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
+       int linedur_ns = 0, framedur_ns = 0;
        int dotclock = mode->crtc_clock;
 
        /* Valid dotclock? */
@@ -612,10 +626,9 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
 
                /*
                 * Convert scanline length in pixels and video
-                * dot clock to line duration, frame duration
-                * and pixel duration in nanoseconds:
+                * dot clock to line duration and frame duration
+                * in nanoseconds:
                 */
-               pixeldur_ns = 1000000 / dotclock;
                linedur_ns  = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
                framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
 
@@ -628,16 +641,14 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
                DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
                          crtc->base.id);
 
-       crtc->pixeldur_ns = pixeldur_ns;
-       crtc->linedur_ns  = linedur_ns;
-       crtc->framedur_ns = framedur_ns;
+       vblank->linedur_ns  = linedur_ns;
+       vblank->framedur_ns = framedur_ns;
 
        DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
                  crtc->base.id, mode->crtc_htotal,
                  mode->crtc_vtotal, mode->crtc_vdisplay);
-       DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
-                 crtc->base.id, dotclock, framedur_ns,
-                 linedur_ns, pixeldur_ns);
+       DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d\n",
+                 crtc->base.id, dotclock, framedur_ns, linedur_ns);
 }
 EXPORT_SYMBOL(drm_calc_timestamping_constants);
 
@@ -651,7 +662,6 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
  * @flags: Flags to pass to driver:
  *         0 = Default,
  *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl IRQ handler
- * @refcrtc: CRTC which defines scanout timing
  * @mode: mode which defines the scanout timings
  *
  * Implements calculation of exact vblank timestamps from given drm_display_mode
@@ -692,15 +702,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
                                          int *max_error,
                                          struct timeval *vblank_time,
                                          unsigned flags,
-                                         const struct drm_crtc *refcrtc,
                                          const struct drm_display_mode *mode)
 {
        struct timeval tv_etime;
        ktime_t stime, etime;
-       int vbl_status;
+       unsigned int vbl_status;
+       int ret = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
        int vpos, hpos, i;
-       int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
-       bool invbl;
+       int delta_ns, duration_ns;
 
        if (pipe >= dev->num_crtcs) {
                DRM_ERROR("Invalid crtc %u\n", pipe);
@@ -713,15 +722,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
                return -EIO;
        }
 
-       /* Durations of frames, lines, pixels in nanoseconds. */
-       framedur_ns = refcrtc->framedur_ns;
-       linedur_ns  = refcrtc->linedur_ns;
-       pixeldur_ns = refcrtc->pixeldur_ns;
-
        /* If mode timing undefined, just return as no-op:
         * Happens during initial modesetting of a crtc.
         */
-       if (framedur_ns == 0) {
+       if (mode->crtc_clock == 0) {
                DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
                return -EAGAIN;
        }
@@ -738,12 +742,14 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
                 * Get vertical and horizontal scanout position vpos, hpos,
                 * and bounding timestamps stime, etime, pre/post query.
                 */
-               vbl_status = dev->driver->get_scanout_position(dev, pipe, flags, &vpos,
-                                                              &hpos, &stime, &etime);
+               vbl_status = dev->driver->get_scanout_position(dev, pipe, flags,
+                                                              &vpos, &hpos,
+                                                              &stime, &etime,
+                                                              mode);
 
                /* Return as no-op if scanout query unsupported or failed. */
                if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
-                       DRM_DEBUG("crtc %u : scanoutpos query failed [%d].\n",
+                       DRM_DEBUG("crtc %u : scanoutpos query failed [0x%x].\n",
                                  pipe, vbl_status);
                        return -EIO;
                }
@@ -770,13 +776,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
         * within vblank area, counting down the number of lines until
         * start of scanout.
         */
-       invbl = vbl_status & DRM_SCANOUTPOS_IN_VBLANK;
+       if (vbl_status & DRM_SCANOUTPOS_IN_VBLANK)
+               ret |= DRM_VBLANKTIME_IN_VBLANK;
 
        /* Convert scanout position into elapsed time at raw_time query
         * since start of scanout at first display scanline. delta_ns
         * can be negative if start of scanout hasn't happened yet.
         */
-       delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
+       delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
+                          mode->crtc_clock);
 
        if (!drm_timestamp_monotonic)
                etime = ktime_mono_to_real(etime);
@@ -792,17 +800,13 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
                etime = ktime_sub_ns(etime, delta_ns);
        *vblank_time = ktime_to_timeval(etime);
 
-       DRM_DEBUG("crtc %u : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
-                 pipe, (int)vbl_status, hpos, vpos,
+       DRM_DEBUG("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+                 pipe, vbl_status, hpos, vpos,
                  (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
                  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
                  duration_ns/1000, i);
 
-       vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
-       if (invbl)
-               vbl_status |= DRM_VBLANKTIME_IN_VBLANK;
-
-       return vbl_status;
+       return ret;
 }
 EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
 
@@ -873,7 +877,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
  * Returns:
  * The software vblank counter.
  */
-u32 drm_vblank_count(struct drm_device *dev, int pipe)
+u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
@@ -914,11 +918,14 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
  * vblank events since the system was booted, including lost events due to
  * modesetting activity. Returns corresponding system timestamp of the time
  * of the vblank interval that corresponds to the current vblank counter value.
+ *
+ * This is the legacy version of drm_crtc_vblank_count_and_time().
  */
 u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
                              struct timeval *vblanktime)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+       int count = DRM_TIMESTAMP_MAXRETRIES;
        u32 cur_vblank;
 
        if (WARN_ON(pipe >= dev->num_crtcs))
@@ -934,12 +941,33 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
                smp_rmb();
                *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
                smp_rmb();
-       } while (cur_vblank != vblank->count);
+       } while (cur_vblank != vblank->count && --count > 0);
 
        return cur_vblank;
 }
 EXPORT_SYMBOL(drm_vblank_count_and_time);
 
+/**
+ * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
+ *     and the system timestamp corresponding to that vblank counter value
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current vblank counter value.
+ *
+ * This is the native KMS version of drm_vblank_count_and_time().
+ */
+u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+                                  struct timeval *vblanktime)
+{
+       return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc),
+                                        vblanktime);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
+
 static void send_vblank_event(struct drm_device *dev,
                struct drm_pending_vblank_event *e,
                unsigned long seq, struct timeval *now)
@@ -1033,7 +1061,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
                        atomic_dec(&vblank->refcount);
                else {
                        vblank->enabled = true;
-                       drm_update_vblank_count(dev, pipe);
+                       drm_update_vblank_count(dev, pipe, 0);
                }
        }
 
@@ -1154,8 +1182,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
  * @dev: DRM device
  * @pipe: CRTC index
  *
- * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
- * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
+ * This waits for one vblank to pass on @pipe, using the irq driver interfaces.
+ * It is a failure to call this when the vblank irq for @pipe is disabled, e.g.
  * due to lack of driver support or because the crtc is off.
  */
 void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
@@ -1276,7 +1304,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
 
 /**
  * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
- * @drm_crtc: CRTC in question
+ * @crtc: CRTC in question
  *
  * Drivers can use this function to reset the vblank state to off at load time.
  * Drivers should use this together with the drm_crtc_vblank_off() and
@@ -1284,12 +1312,12 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
  * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
  * and hence doesn't need to call any driver hooks.
  */
-void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc)
+void drm_crtc_vblank_reset(struct drm_crtc *crtc)
 {
-       struct drm_device *dev = drm_crtc->dev;
+       struct drm_device *dev = crtc->dev;
        unsigned long irqflags;
-       int crtc = drm_crtc_index(drm_crtc);
-       struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+       unsigned int pipe = drm_crtc_index(crtc);
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
        /*
@@ -1333,16 +1361,8 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
                vblank->inmodeset = 0;
        }
 
-       /*
-        * sample the current counter to avoid random jumps
-        * when drm_vblank_enable() applies the diff
-        *
-        * -1 to make sure user will never see the same
-        * vblank counter value before and after a modeset
-        */
-       vblank->last =
-               (dev->driver->get_vblank_counter(dev, pipe) - 1) &
-               dev->max_vblank_count;
+       drm_reset_vblank_timestamp(dev, pipe);
+
        /*
         * re-enable interrupts if there are users left, or the
         * user wishes vblank interrupts to be enabled all the time.
@@ -1725,9 +1745,6 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
 bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-       u32 vblcount;
-       s64 diff_ns;
-       struct timeval tvblank;
        unsigned long irqflags;
 
        if (WARN_ON_ONCE(!dev->num_crtcs))
@@ -1751,32 +1768,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
                return false;
        }
 
-       /* Fetch corresponding timestamp for this vblank interval from
-        * driver and store it in proper slot of timestamp ringbuffer.
-        */
-
-       /* Get current timestamp and count. */
-       vblcount = vblank->count;
-       drm_get_last_vbltimestamp(dev, pipe, &tvblank, DRM_CALLED_FROM_VBLIRQ);
-
-       /* Compute time difference to timestamp of last vblank */
-       diff_ns = timeval_to_ns(&tvblank) -
-                 timeval_to_ns(&vblanktimestamp(dev, pipe, vblcount));
-
-       /* Update vblank timestamp and count if at least
-        * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
-        * difference between last stored timestamp and current
-        * timestamp. A smaller difference means basically
-        * identical timestamps. Happens if this vblank has
-        * been already processed and this is a redundant call,
-        * e.g., due to spurious vblank interrupts. We need to
-        * ignore those for accounting.
-        */
-       if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
-               store_vblank(dev, pipe, 1, &tvblank);
-       else
-               DRM_DEBUG("crtc %u: Redundant vblirq ignored. diff_ns = %d\n",
-                         pipe, (int) diff_ns);
+       drm_update_vblank_count(dev, pipe, DRM_CALLED_FROM_VBLIRQ);
 
        spin_unlock(&dev->vblank_time_lock);
 
@@ -1806,3 +1798,20 @@ bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
        return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
 }
 EXPORT_SYMBOL(drm_crtc_handle_vblank);
+
+/**
+ * drm_vblank_no_hw_counter - "No hw counter" implementation of .get_vblank_counter()
+ * @dev: DRM device
+ * @pipe: CRTC for which to read the counter
+ *
+ * Drivers can plug this into the .get_vblank_counter() function if
+ * there is no useable hardware frame counter available.
+ *
+ * Returns:
+ * 0
+ */
+u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
+{
+       return 0;
+}
+EXPORT_SYMBOL(drm_vblank_no_hw_counter);
index a521ef6ff8072ab7e65508b5da98b84dc058bfa0..87a8cb73366f258697e0001db3260dd93e19e294 100644 (file)
@@ -38,7 +38,7 @@
 #include <drm/drmP.h>
 #include "drm_legacy.h"
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 
 #ifdef HAVE_PAGE_AGP
 # include <asm/agp.h>
@@ -111,14 +111,14 @@ int drm_unbind_agp(struct agp_memory * handle)
        return agp_unbind_memory(handle);
 }
 
-#else  /*  __OS_HAS_AGP  */
+#else /*  CONFIG_AGP  */
 static inline void *agp_remap(unsigned long offset, unsigned long size,
                              struct drm_device * dev)
 {
        return NULL;
 }
 
-#endif                         /* agp */
+#endif /* CONFIG_AGP */
 
 void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev)
 {
index 3427b115e2bb895e0e54a7d21c16efad41210d73..04de6fd88f8c3a7f5d503c45768582a10a71efd4 100644 (file)
@@ -267,12 +267,12 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
        if (adj_end > end)
                adj_end = end;
 
-       if (flags & DRM_MM_CREATE_TOP)
-               adj_start = adj_end - size;
-
        if (mm->color_adjust)
                mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
+       if (flags & DRM_MM_CREATE_TOP)
+               adj_start = adj_end - size;
+
        if (alignment) {
                u64 tmp = adj_start;
                unsigned rem;
index fba321ca434485d0f6f6d7898c84711eea47a577..6675b14284105cd639fb7fbb7ee9967cfcc38aa9 100644 (file)
@@ -307,6 +307,8 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
        WARN_ON(ctx->contended);
 
        if (ctx->trylock_only) {
+               lockdep_assert_held(&ctx->ww_ctx);
+
                if (!ww_mutex_trylock(&lock->mutex))
                        return -EBUSY;
                else
index 1b1bd42b03687683202eb600425c5d7e582c61ad..fcd2a86acd2cd66bf7faf49e21fd330088242e34 100644 (file)
@@ -266,6 +266,9 @@ void drm_pci_agp_destroy(struct drm_device *dev)
  * then register the character device and inter module information.
  * Try and register, if we fail to register, backout previous work.
  *
+ * NOTE: This function is deprecated, please use drm_dev_alloc() and
+ * drm_dev_register() instead and remove your ->load() callback.
+ *
  * Return: 0 on success or a negative error code on failure.
  */
 int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
@@ -326,6 +329,10 @@ EXPORT_SYMBOL(drm_get_pci_dev);
  * Initializes a drm_device structures, registering the stubs and initializing
  * the AGP device.
  *
+ * NOTE: This function is deprecated. Modern modesetting drm drivers should use
+ * pci_register_driver() directly, this function only provides shadow-binding
+ * support for old legacy drivers on top of that core pci function.
+ *
  * Return: 0 on success or a negative error code on failure.
  */
 int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
@@ -435,6 +442,10 @@ EXPORT_SYMBOL(drm_pci_init);
  *
  * Unregisters one or more devices matched by a PCI driver from the DRM
  * subsystem.
+ *
+ * NOTE: This function is deprecated. Modern modesetting drm drivers should use
+ * pci_unregister_driver() directly, this function only provides shadow-binding
+ * support for old legacy drivers on top of that core pci function.
  */
 void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
 {
index 5e5a07af02c85c4297df213847759bd620a82390..d384ebcf0aaf52a63125c9599553fc24e6a7f87b 100644 (file)
@@ -426,7 +426,7 @@ int drm_plane_helper_commit(struct drm_plane *plane,
 
        if (plane_funcs->prepare_fb && plane_state->fb &&
            plane_state->fb != old_fb) {
-               ret = plane_funcs->prepare_fb(plane, plane_state->fb,
+               ret = plane_funcs->prepare_fb(plane,
                                              plane_state);
                if (ret)
                        goto out;
@@ -479,8 +479,8 @@ int drm_plane_helper_commit(struct drm_plane *plane,
                ret = 0;
        }
 
-       if (plane_funcs->cleanup_fb && old_fb)
-               plane_funcs->cleanup_fb(plane, old_fb, plane_state);
+       if (plane_funcs->cleanup_fb)
+               plane_funcs->cleanup_fb(plane, plane_state);
 out:
        if (plane_state) {
                if (plane->funcs->atomic_destroy_state)
index 5314c9d5fef473daeb14677f08497780e87ef987..644169e1a0296fe6fa07c91cc3c8520bf9588c4a 100644 (file)
@@ -95,6 +95,9 @@ EXPORT_SYMBOL(drm_platform_set_busid);
  * subsystem, initializing a drm_device structure and calling the driver's
  * .load() function.
  *
+ * NOTE: This function is deprecated, please use drm_dev_alloc() and
+ * drm_dev_register() instead and remove your ->load() callback.
+ *
  * Return: 0 on success or a negative error code on failure.
  */
 int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
index 631f5afd451c2bed39e94bde3e2ad71483c6e0ae..531ac4cc9756deb9e51f12fd4aefcff46a5c72b7 100644 (file)
@@ -330,7 +330,7 @@ void drm_rect_rotate(struct drm_rect *r,
                }
        }
 
-       switch (rotation & 0xf) {
+       switch (rotation & DRM_ROTATE_MASK) {
        case BIT(DRM_ROTATE_0):
                break;
        case BIT(DRM_ROTATE_90):
@@ -390,7 +390,7 @@ void drm_rect_rotate_inv(struct drm_rect *r,
 {
        struct drm_rect tmp;
 
-       switch (rotation & 0xf) {
+       switch (rotation & DRM_ROTATE_MASK) {
        case BIT(DRM_ROTATE_0):
                break;
        case BIT(DRM_ROTATE_90):
index 0f6cd33b531f104f5094513a0992eb99361e65b3..615b7e667320184df169765862df055f36d7e0fd 100644 (file)
@@ -30,6 +30,8 @@ static struct device_type drm_sysfs_device_minor = {
        .name = "drm_minor"
 };
 
+struct class *drm_class;
+
 /**
  * __drm_class_suspend - internal DRM class suspend routine
  * @dev: Linux device to suspend
@@ -112,41 +114,34 @@ static CLASS_ATTR_STRING(version, S_IRUGO,
                CORE_DATE);
 
 /**
- * drm_sysfs_create - create a struct drm_sysfs_class structure
- * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
- * @name: pointer to a string for the name of this class.
+ * drm_sysfs_init - initialize sysfs helpers
+ *
+ * This is used to create the DRM class, which is the implicit parent of any
+ * other top-level DRM sysfs objects.
  *
- * This is used to create DRM class pointer that can then be used
- * in calls to drm_sysfs_device_add().
+ * You must call drm_sysfs_destroy() to release the allocated resources.
  *
- * Note, the pointer created here is to be destroyed when finished by making a
- * call to drm_sysfs_destroy().
+ * Return: 0 on success, negative error code on failure.
  */
-struct class *drm_sysfs_create(struct module *owner, char *name)
+int drm_sysfs_init(void)
 {
-       struct class *class;
        int err;
 
-       class = class_create(owner, name);
-       if (IS_ERR(class)) {
-               err = PTR_ERR(class);
-               goto err_out;
-       }
-
-       class->pm = &drm_class_dev_pm_ops;
-
-       err = class_create_file(class, &class_attr_version.attr);
-       if (err)
-               goto err_out_class;
+       drm_class = class_create(THIS_MODULE, "drm");
+       if (IS_ERR(drm_class))
+               return PTR_ERR(drm_class);
 
-       class->devnode = drm_devnode;
+       drm_class->pm = &drm_class_dev_pm_ops;
 
-       return class;
+       err = class_create_file(drm_class, &class_attr_version.attr);
+       if (err) {
+               class_destroy(drm_class);
+               drm_class = NULL;
+               return err;
+       }
 
-err_out_class:
-       class_destroy(class);
-err_out:
-       return ERR_PTR(err);
+       drm_class->devnode = drm_devnode;
+       return 0;
 }
 
 /**
@@ -156,7 +151,7 @@ err_out:
  */
 void drm_sysfs_destroy(void)
 {
-       if ((drm_class == NULL) || (IS_ERR(drm_class)))
+       if (IS_ERR_OR_NULL(drm_class))
                return;
        class_remove_file(drm_class, &class_attr_version.attr);
        class_destroy(drm_class);
@@ -235,18 +230,12 @@ static ssize_t dpms_show(struct device *device,
                           char *buf)
 {
        struct drm_connector *connector = to_drm_connector(device);
-       struct drm_device *dev = connector->dev;
-       uint64_t dpms_status;
-       int ret;
+       int dpms;
 
-       ret = drm_object_property_get_value(&connector->base,
-                                           dev->mode_config.dpms_property,
-                                           &dpms_status);
-       if (ret)
-               return 0;
+       dpms = READ_ONCE(connector->dpms);
 
        return snprintf(buf, PAGE_SIZE, "%s\n",
-                       drm_get_dpms_name((int)dpms_status));
+                       drm_get_dpms_name(dpms));
 }
 
 static ssize_t enabled_show(struct device *device,
index aab49ee4ed40d2ce5b525554209fd3ffe40340b4..f90bd5fe35babbc6b6eb216cefd0f0dad054932d 100644 (file)
@@ -95,7 +95,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  * Find the right map and if it's AGP memory find the real physical page to
  * map, get the page, increment the use count and return it.
  */
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_file *priv = vma->vm_file->private_data;
@@ -168,12 +168,12 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 vm_fault_error:
        return VM_FAULT_SIGBUS; /* Disallow mremap */
 }
-#else                          /* __OS_HAS_AGP */
+#else
 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        return VM_FAULT_SIGBUS;
 }
-#endif                         /* __OS_HAS_AGP */
+#endif
 
 /**
  * \c nopage method for shared virtual memory.
@@ -556,7 +556,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
         * --BenH.
         */
        if (!vma->vm_pgoff
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
            && (!dev->agp
                || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
 #endif
index ed28823d3b35ef704a5dded0c1c55c1eff6ef3ed..50dec0df160fc270d710486e95c4ed91584f04a5 100644 (file)
@@ -152,7 +152,7 @@ err_crtc:
        return ERR_PTR(ret);
 }
 
-int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct exynos_drm_private *private = dev->dev_private;
        struct exynos_drm_crtc *exynos_crtc =
@@ -164,7 +164,7 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct exynos_drm_private *private = dev->dev_private;
        struct exynos_drm_crtc *exynos_crtc =
index f87d4abda6f7b5ca0f69712a887acd065f252d69..f9f365bd025765f998fcbc8c8cd205fcab92e929 100644 (file)
@@ -23,8 +23,8 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
                                        enum exynos_drm_output_type type,
                                        const struct exynos_drm_crtc_ops *ops,
                                        void *context);
-int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
-void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
 void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
 void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
                                   struct exynos_drm_plane *exynos_plane);
index ae9e6b2d3758a97104ac6be69f1a970e6c0f3bb6..d8568af2b4144c65915d17030e72e11f3a772437 100644 (file)
@@ -105,7 +105,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
                atomic_inc(&exynos_crtc->pending_update);
        }
 
-       drm_atomic_helper_commit_planes(dev, state);
+       drm_atomic_helper_commit_planes(dev, state, false);
 
        exynos_atomic_wait_for_commit(state);
 
@@ -449,7 +449,7 @@ static struct drm_driver exynos_drm_driver = {
        .lastclose              = exynos_drm_lastclose,
        .postclose              = exynos_drm_postclose,
        .set_busid              = drm_platform_set_busid,
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = exynos_drm_crtc_enable_vblank,
        .disable_vblank         = exynos_drm_crtc_disable_vblank,
        .gem_free_object        = exynos_drm_gem_free_object,
index 9a8e2da47158b7f989ef8df16f2223bb57868b27..1930234ba5f13596f3b998d737c88e987b3370e7 100644 (file)
@@ -140,7 +140,7 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc)
+static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
        unsigned int value;
@@ -156,7 +156,8 @@ static int fsl_dcu_drm_enable_vblank(struct drm_device *dev, int crtc)
        return 0;
 }
 
-static void fsl_dcu_drm_disable_vblank(struct drm_device *dev, int crtc)
+static void fsl_dcu_drm_disable_vblank(struct drm_device *dev,
+                                      unsigned int pipe)
 {
        struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
        unsigned int value;
@@ -192,7 +193,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
        .unload                 = fsl_dcu_unload,
        .preclose               = fsl_dcu_drm_preclose,
        .irq_handler            = fsl_dcu_drm_irq,
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = fsl_dcu_drm_enable_vblank,
        .disable_vblank         = fsl_dcu_drm_disable_vblank,
        .gem_free_object        = drm_gem_cma_free_object,
index d1e300dcd544a7cad7eb115849b307a6b73a7867..51daaea40b4d882c714609d38a5ced2be8c94a0c 100644 (file)
@@ -191,14 +191,12 @@ set_failed:
 
 static void
 fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane,
-                            struct drm_framebuffer *fb,
                             const struct drm_plane_state *new_state)
 {
 }
 
 static int
 fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane,
-                            struct drm_framebuffer *fb,
                             const struct drm_plane_state *new_state)
 {
        return 0;
index 0fafb8e2483aa074ebd5ced75024c53ba435a5f7..17cea400ae32f94b015cd3a493824806cee37a18 100644 (file)
@@ -247,7 +247,6 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
 
 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
 
-#define DP_LINK_STATUS_SIZE    6
 #define DP_LINK_CHECK_TIMEOUT  (10 * 1000)
 
 #define DP_LINK_CONFIGURATION_SIZE     9
index e38057b918657de7812e18071e084ae7538db8c8..e21726ecac327792c17be5bf56ec8566bd0c2304 100644 (file)
@@ -687,15 +687,15 @@ extern void psb_irq_turn_off_dpst(struct drm_device *dev);
 extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
 extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
 extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern int psb_enable_vblank(struct drm_device *dev, int crtc);
-extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+extern int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
+extern void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
 void
 psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
 
 void
 psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
 
-extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+extern u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
 
 /* framebuffer.c */
 extern int psbfb_probed(struct drm_device *dev);
index 624eb36511c5dc226e4cb0817eb1322fc3011326..78eb10902809139145ad6b2dab3d690ee5eb2b52 100644 (file)
@@ -510,7 +510,7 @@ int psb_irq_disable_dpst(struct drm_device *dev)
 /*
  * It is used to enable VBLANK interrupt
  */
-int psb_enable_vblank(struct drm_device *dev, int pipe)
+int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -549,7 +549,7 @@ int psb_enable_vblank(struct drm_device *dev, int pipe)
 /*
  * It is used to disable VBLANK interrupt
  */
-void psb_disable_vblank(struct drm_device *dev, int pipe)
+void psb_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -622,7 +622,7 @@ void mdfld_disable_te(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
-u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        uint32_t high_frame = PIPEAFRAMEHIGH;
        uint32_t low_frame = PIPEAFRAMEPIXEL;
@@ -654,7 +654,7 @@ u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
        reg_val = REG_READ(pipeconf_reg);
 
        if (!(reg_val & PIPEACONF_ENABLE)) {
-               dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
+               dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
                                                                pipe);
                goto psb_get_vblank_counter_exit;
        }
index d0b45ffa112600b65beb932f8fb1b4e5802453af..e6a81a8c9f3548214d0dfc7f7350e4b431765ab3 100644 (file)
@@ -38,9 +38,9 @@ int psb_irq_enable_dpst(struct drm_device *dev);
 int psb_irq_disable_dpst(struct drm_device *dev);
 void psb_irq_turn_on_dpst(struct drm_device *dev);
 void psb_irq_turn_off_dpst(struct drm_device *dev);
-int  psb_enable_vblank(struct drm_device *dev, int pipe);
-void psb_disable_vblank(struct drm_device *dev, int pipe);
-u32  psb_get_vblank_counter(struct drm_device *dev, int pipe);
+int  psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
+void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
+u32  psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
 
 int mdfld_enable_te(struct drm_device *dev, int pipe);
 void mdfld_disable_te(struct drm_device *dev, int pipe);
index 51fa323920299ebf5dc57c868054cde77955fcd0..d9a72c96e56cea529d404a92f21dfe0475f9dd2c 100644 (file)
@@ -119,8 +119,8 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
        struct ch7006_encoder_params *params = &priv->params;
        struct ch7006_state *state = &priv->state;
        uint8_t *regs = state->regs;
-       struct ch7006_mode *mode = priv->mode;
-       struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+       const struct ch7006_mode *mode = priv->mode;
+       const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
        int start_active;
 
        ch7006_dbg(client, "\n");
@@ -226,7 +226,7 @@ static int ch7006_encoder_get_modes(struct drm_encoder *encoder,
                                    struct drm_connector *connector)
 {
        struct ch7006_priv *priv = to_ch7006_priv(encoder);
-       struct ch7006_mode *mode;
+       const struct ch7006_mode *mode;
        int n = 0;
 
        for (mode = ch7006_modes; mode->mode.clock; mode++) {
index 9b83574141a63243ba232b5c1ab0da0c1874e68f..bb5f67f10edbc0f46a2e09c2d7dfd31773c5fd35 100644 (file)
@@ -26,7 +26,7 @@
 
 #include "ch7006_priv.h"
 
-char *ch7006_tv_norm_names[] = {
+const char * const ch7006_tv_norm_names[] = {
        [TV_NORM_PAL] = "PAL",
        [TV_NORM_PAL_M] = "PAL-M",
        [TV_NORM_PAL_N] = "PAL-N",
@@ -46,7 +46,7 @@ char *ch7006_tv_norm_names[] = {
                .vtotal = 625,                                  \
                .hvirtual = 810
 
-struct ch7006_tv_norm_info ch7006_tv_norms[] = {
+const struct ch7006_tv_norm_info ch7006_tv_norms[] = {
        [TV_NORM_NTSC_M] = {
                NTSC_LIKE_TIMINGS,
                .black_level = 0.339 * fixed1,
@@ -142,7 +142,7 @@ struct ch7006_tv_norm_info ch7006_tv_norms[] = {
 
 #define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC)
 
-struct ch7006_mode ch7006_modes[] = {
+const struct ch7006_mode ch7006_modes[] = {
        MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE),
        MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE),
        MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE),
@@ -171,11 +171,11 @@ struct ch7006_mode ch7006_modes[] = {
        {}
 };
 
-struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
-                                      const struct drm_display_mode *drm_mode)
+const struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+                                            const struct drm_display_mode *drm_mode)
 {
        struct ch7006_priv *priv = to_ch7006_priv(encoder);
-       struct ch7006_mode *mode;
+       const struct ch7006_mode *mode;
 
        for (mode = ch7006_modes; mode->mode.clock; mode++) {
 
@@ -202,7 +202,7 @@ void ch7006_setup_levels(struct drm_encoder *encoder)
        struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
        struct ch7006_priv *priv = to_ch7006_priv(encoder);
        uint8_t *regs = priv->state.regs;
-       struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+       const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
        int gain;
        int black_level;
 
@@ -233,8 +233,8 @@ void ch7006_setup_subcarrier(struct drm_encoder *encoder)
        struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
        struct ch7006_priv *priv = to_ch7006_priv(encoder);
        struct ch7006_state *state = &priv->state;
-       struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
-       struct ch7006_mode *mode = priv->mode;
+       const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+       const struct ch7006_mode *mode = priv->mode;
        uint32_t subc_inc;
 
        subc_inc = round_fixed((mode->subc_coeff >> 8)
@@ -257,7 +257,7 @@ void ch7006_setup_pll(struct drm_encoder *encoder)
        struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
        struct ch7006_priv *priv = to_ch7006_priv(encoder);
        uint8_t *regs = priv->state.regs;
-       struct ch7006_mode *mode = priv->mode;
+       const struct ch7006_mode *mode = priv->mode;
        int n, best_n = 0;
        int m, best_m = 0;
        int freq, best_freq = 0;
@@ -328,9 +328,9 @@ void ch7006_setup_properties(struct drm_encoder *encoder)
        struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
        struct ch7006_priv *priv = to_ch7006_priv(encoder);
        struct ch7006_state *state = &priv->state;
-       struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
-       struct ch7006_mode *ch_mode = priv->mode;
-       struct drm_display_mode *mode = &ch_mode->mode;
+       const struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+       const struct ch7006_mode *ch_mode = priv->mode;
+       const struct drm_display_mode *mode = &ch_mode->mode;
        uint8_t *regs = state->regs;
        int flicker, contrast, hpos, vpos;
        uint64_t scale, aspect;
index ce577841f9319850f2b87f06f8819955ceeeb371..dc6414af5d79eec1e38a35e6b045a1e8f282f048 100644 (file)
@@ -78,7 +78,7 @@ struct ch7006_state {
 
 struct ch7006_priv {
        struct ch7006_encoder_params params;
-       struct ch7006_mode *mode;
+       const struct ch7006_mode *mode;
 
        struct ch7006_state state;
        struct ch7006_state saved_state;
@@ -106,12 +106,12 @@ extern int ch7006_debug;
 extern char *ch7006_tv_norm;
 extern int ch7006_scale;
 
-extern char *ch7006_tv_norm_names[];
-extern struct ch7006_tv_norm_info ch7006_tv_norms[];
-extern struct ch7006_mode ch7006_modes[];
+extern const char * const ch7006_tv_norm_names[];
+extern const struct ch7006_tv_norm_info ch7006_tv_norms[];
+extern const struct ch7006_mode ch7006_modes[];
 
-struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
-                                      const struct drm_display_mode *drm_mode);
+const struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+                                            const struct drm_display_mode *drm_mode);
 
 void ch7006_setup_levels(struct drm_encoder *encoder);
 void ch7006_setup_subcarrier(struct drm_encoder *encoder);
index 424228be79ae5b2aa1557ca07331e4e49e665ef8..896b6aaf8c4d0e376506913914961d1639785321 100644 (file)
@@ -23,7 +23,6 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder_slave.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_of.h>
 #include <drm/i2c/tda998x.h>
@@ -34,9 +33,8 @@ struct tda998x_priv {
        struct i2c_client *cec;
        struct i2c_client *hdmi;
        struct mutex mutex;
-       struct delayed_work dwork;
-       uint16_t rev;
-       uint8_t current_page;
+       u16 rev;
+       u8 current_page;
        int dpms;
        bool is_hdmi_sink;
        u8 vip_cntrl_0;
@@ -46,10 +44,21 @@ struct tda998x_priv {
 
        wait_queue_head_t wq_edid;
        volatile int wq_edid_wait;
-       struct drm_encoder *encoder;
+
+       struct work_struct detect_work;
+       struct timer_list edid_delay_timer;
+       wait_queue_head_t edid_delay_waitq;
+       bool edid_delay_active;
+
+       struct drm_encoder encoder;
+       struct drm_connector connector;
 };
 
-#define to_tda998x_priv(x)  ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
+#define conn_to_tda998x_priv(x) \
+       container_of(x, struct tda998x_priv, connector)
+
+#define enc_to_tda998x_priv(x) \
+       container_of(x, struct tda998x_priv, encoder)
 
 /* The TDA9988 series of devices use a paged register scheme.. to simplify
  * things we encode the page # in upper bits of the register #.  To read/
@@ -326,6 +335,8 @@ struct tda998x_priv {
 # define CEC_FRO_IM_CLK_CTRL_FRO_DIV   (1 << 0)
 #define REG_CEC_RXSHPDINTENA     0xfc                /* read/write */
 #define REG_CEC_RXSHPDINT        0xfd                /* read */
+# define CEC_RXSHPDINT_RXSENS     BIT(0)
+# define CEC_RXSHPDINT_HPD        BIT(1)
 #define REG_CEC_RXSHPDLEV         0xfe                /* read */
 # define CEC_RXSHPDLEV_RXSENS     (1 << 0)
 # define CEC_RXSHPDLEV_HPD        (1 << 1)
@@ -345,10 +356,10 @@ struct tda998x_priv {
 #define TDA19988                  0x0301
 
 static void
-cec_write(struct tda998x_priv *priv, uint16_t addr, uint8_t val)
+cec_write(struct tda998x_priv *priv, u16 addr, u8 val)
 {
        struct i2c_client *client = priv->cec;
-       uint8_t buf[] = {addr, val};
+       u8 buf[] = {addr, val};
        int ret;
 
        ret = i2c_master_send(client, buf, sizeof(buf));
@@ -356,11 +367,11 @@ cec_write(struct tda998x_priv *priv, uint16_t addr, uint8_t val)
                dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
 }
 
-static uint8_t
-cec_read(struct tda998x_priv *priv, uint8_t addr)
+static u8
+cec_read(struct tda998x_priv *priv, u8 addr)
 {
        struct i2c_client *client = priv->cec;
-       uint8_t val;
+       u8 val;
        int ret;
 
        ret = i2c_master_send(client, &addr, sizeof(addr));
@@ -379,11 +390,11 @@ fail:
 }
 
 static int
-set_page(struct tda998x_priv *priv, uint16_t reg)
+set_page(struct tda998x_priv *priv, u16 reg)
 {
        if (REG2PAGE(reg) != priv->current_page) {
                struct i2c_client *client = priv->hdmi;
-               uint8_t buf[] = {
+               u8 buf[] = {
                                REG_CURPAGE, REG2PAGE(reg)
                };
                int ret = i2c_master_send(client, buf, sizeof(buf));
@@ -399,10 +410,10 @@ set_page(struct tda998x_priv *priv, uint16_t reg)
 }
 
 static int
-reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
+reg_read_range(struct tda998x_priv *priv, u16 reg, char *buf, int cnt)
 {
        struct i2c_client *client = priv->hdmi;
-       uint8_t addr = REG2ADDR(reg);
+       u8 addr = REG2ADDR(reg);
        int ret;
 
        mutex_lock(&priv->mutex);
@@ -428,10 +439,10 @@ out:
 }
 
 static void
-reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
+reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
 {
        struct i2c_client *client = priv->hdmi;
-       uint8_t buf[cnt+1];
+       u8 buf[cnt+1];
        int ret;
 
        buf[0] = REG2ADDR(reg);
@@ -450,9 +461,9 @@ out:
 }
 
 static int
-reg_read(struct tda998x_priv *priv, uint16_t reg)
+reg_read(struct tda998x_priv *priv, u16 reg)
 {
-       uint8_t val = 0;
+       u8 val = 0;
        int ret;
 
        ret = reg_read_range(priv, reg, &val, sizeof(val));
@@ -462,10 +473,10 @@ reg_read(struct tda998x_priv *priv, uint16_t reg)
 }
 
 static void
-reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
+reg_write(struct tda998x_priv *priv, u16 reg, u8 val)
 {
        struct i2c_client *client = priv->hdmi;
-       uint8_t buf[] = {REG2ADDR(reg), val};
+       u8 buf[] = {REG2ADDR(reg), val};
        int ret;
 
        mutex_lock(&priv->mutex);
@@ -481,10 +492,10 @@ out:
 }
 
 static void
-reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
+reg_write16(struct tda998x_priv *priv, u16 reg, u16 val)
 {
        struct i2c_client *client = priv->hdmi;
-       uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
+       u8 buf[] = {REG2ADDR(reg), val >> 8, val};
        int ret;
 
        mutex_lock(&priv->mutex);
@@ -500,7 +511,7 @@ out:
 }
 
 static void
-reg_set(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
+reg_set(struct tda998x_priv *priv, u16 reg, u8 val)
 {
        int old_val;
 
@@ -510,7 +521,7 @@ reg_set(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
 }
 
 static void
-reg_clear(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
+reg_clear(struct tda998x_priv *priv, u16 reg, u8 val)
 {
        int old_val;
 
@@ -551,15 +562,50 @@ tda998x_reset(struct tda998x_priv *priv)
        reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
 }
 
-/* handle HDMI connect/disconnect */
-static void tda998x_hpd(struct work_struct *work)
+/*
+ * The TDA998x has a problem when trying to read the EDID close to a
+ * HPD assertion: it needs a delay of 100ms to avoid timing out while
+ * trying to read EDID data.
+ *
+ * However, tda998x_encoder_get_modes() may be called at any moment
+ * after tda998x_connector_detect() indicates that we are connected, so
+ * we need to delay probing modes in tda998x_encoder_get_modes() after
+ * we have seen a HPD inactive->active transition.  This code implements
+ * that delay.
+ */
+static void tda998x_edid_delay_done(unsigned long data)
+{
+       struct tda998x_priv *priv = (struct tda998x_priv *)data;
+
+       priv->edid_delay_active = false;
+       wake_up(&priv->edid_delay_waitq);
+       schedule_work(&priv->detect_work);
+}
+
+static void tda998x_edid_delay_start(struct tda998x_priv *priv)
+{
+       priv->edid_delay_active = true;
+       mod_timer(&priv->edid_delay_timer, jiffies + HZ/10);
+}
+
+static int tda998x_edid_delay_wait(struct tda998x_priv *priv)
+{
+       return wait_event_killable(priv->edid_delay_waitq, !priv->edid_delay_active);
+}
+
+/*
+ * We need to run the KMS hotplug event helper outside of our threaded
+ * interrupt routine as this can call back into our get_modes method,
+ * which will want to make use of interrupts.
+ */
+static void tda998x_detect_work(struct work_struct *work)
 {
-       struct delayed_work *dwork = to_delayed_work(work);
        struct tda998x_priv *priv =
-                       container_of(dwork, struct tda998x_priv, dwork);
+               container_of(work, struct tda998x_priv, detect_work);
+       struct drm_device *dev = priv->encoder.dev;
 
-       if (priv->encoder && priv->encoder->dev)
-               drm_kms_helper_hotplug_event(priv->encoder->dev);
+       if (dev)
+               drm_kms_helper_hotplug_event(dev);
 }
 
 /*
@@ -569,9 +615,8 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
 {
        struct tda998x_priv *priv = data;
        u8 sta, cec, lvl, flag0, flag1, flag2;
+       bool handled = false;
 
-       if (!priv)
-               return IRQ_HANDLED;
        sta = cec_read(priv, REG_CEC_INTSTATUS);
        cec = cec_read(priv, REG_CEC_RXSHPDINT);
        lvl = cec_read(priv, REG_CEC_RXSHPDLEV);
@@ -581,75 +626,76 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
        DRM_DEBUG_DRIVER(
                "tda irq sta %02x cec %02x lvl %02x f0 %02x f1 %02x f2 %02x\n",
                sta, cec, lvl, flag0, flag1, flag2);
+
+       if (cec & CEC_RXSHPDINT_HPD) {
+               if (lvl & CEC_RXSHPDLEV_HPD)
+                       tda998x_edid_delay_start(priv);
+               else
+                       schedule_work(&priv->detect_work);
+
+               handled = true;
+       }
+
        if ((flag2 & INT_FLAGS_2_EDID_BLK_RD) && priv->wq_edid_wait) {
                priv->wq_edid_wait = 0;
                wake_up(&priv->wq_edid);
-       } else if (cec != 0) {                  /* HPD change */
-               schedule_delayed_work(&priv->dwork, HZ/10);
+               handled = true;
        }
-       return IRQ_HANDLED;
-}
 
-static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
-{
-       int sum = 0;
-
-       while (bytes--)
-               sum -= *buf++;
-       return sum;
+       return IRQ_RETVAL(handled);
 }
 
-#define HB(x) (x)
-#define PB(x) (HB(2) + 1 + (x))
-
 static void
-tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
-                uint8_t *buf, size_t size)
+tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
+                union hdmi_infoframe *frame)
 {
+       u8 buf[32];
+       ssize_t len;
+
+       len = hdmi_infoframe_pack(frame, buf, sizeof(buf));
+       if (len < 0) {
+               dev_err(&priv->hdmi->dev,
+                       "hdmi_infoframe_pack() type=0x%02x failed: %zd\n",
+                       frame->any.type, len);
+               return;
+       }
+
        reg_clear(priv, REG_DIP_IF_FLAGS, bit);
-       reg_write_range(priv, addr, buf, size);
+       reg_write_range(priv, addr, buf, len);
        reg_set(priv, REG_DIP_IF_FLAGS, bit);
 }
 
 static void
 tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
 {
-       u8 buf[PB(HDMI_AUDIO_INFOFRAME_SIZE) + 1];
+       union hdmi_infoframe frame;
+
+       hdmi_audio_infoframe_init(&frame.audio);
 
-       memset(buf, 0, sizeof(buf));
-       buf[HB(0)] = HDMI_INFOFRAME_TYPE_AUDIO;
-       buf[HB(1)] = 0x01;
-       buf[HB(2)] = HDMI_AUDIO_INFOFRAME_SIZE;
-       buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
-       buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
-       buf[PB(4)] = p->audio_frame[4];
-       buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+       frame.audio.channels = p->audio_frame[1] & 0x07;
+       frame.audio.channel_allocation = p->audio_frame[4];
+       frame.audio.level_shift_value = (p->audio_frame[5] & 0x78) >> 3;
+       frame.audio.downmix_inhibit = (p->audio_frame[5] & 0x80) >> 7;
 
-       buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
+       /*
+        * L-PCM and IEC61937 compressed audio shall always set sample
+        * frequency to "refer to stream".  For others, see the HDMI
+        * specification.
+        */
+       frame.audio.sample_frequency = (p->audio_frame[2] & 0x1c) >> 2;
 
-       tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
-                        sizeof(buf));
+       tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, &frame);
 }
 
 static void
 tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
 {
-       struct hdmi_avi_infoframe frame;
-       u8 buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
-       ssize_t len;
+       union hdmi_infoframe frame;
 
-       drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+       drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+       frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
 
-       frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
-
-       len = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf));
-       if (len < 0) {
-               dev_err(&priv->hdmi->dev,
-                       "hdmi_avi_infoframe_pack() failed: %zd\n", len);
-               return;
-       }
-
-       tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf, len);
+       tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
 }
 
 static void tda998x_audio_mute(struct tda998x_priv *priv, bool on)
@@ -667,8 +713,8 @@ static void
 tda998x_configure_audio(struct tda998x_priv *priv,
                struct drm_display_mode *mode, struct tda998x_encoder_params *p)
 {
-       uint8_t buf[6], clksel_aip, clksel_fs, cts_n, adiv;
-       uint32_t n;
+       u8 buf[6], clksel_aip, clksel_fs, cts_n, adiv;
+       u32 n;
 
        /* Enable audio ports */
        reg_write(priv, REG_ENA_AP, p->audio_cfg);
@@ -776,8 +822,10 @@ static void tda998x_encoder_set_config(struct tda998x_priv *priv,
        priv->params = *p;
 }
 
-static void tda998x_encoder_dpms(struct tda998x_priv *priv, int mode)
+static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
+       struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+
        /* we only care about on or off: */
        if (mode != DRM_MODE_DPMS_ON)
                mode = DRM_MODE_DPMS_OFF;
@@ -827,8 +875,8 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
        return true;
 }
 
-static int tda998x_encoder_mode_valid(struct tda998x_priv *priv,
-                                     struct drm_display_mode *mode)
+static int tda998x_connector_mode_valid(struct drm_connector *connector,
+                                       struct drm_display_mode *mode)
 {
        if (mode->clock > 150000)
                return MODE_CLOCK_HIGH;
@@ -840,18 +888,19 @@ static int tda998x_encoder_mode_valid(struct tda998x_priv *priv,
 }
 
 static void
-tda998x_encoder_mode_set(struct tda998x_priv *priv,
+tda998x_encoder_mode_set(struct drm_encoder *encoder,
                         struct drm_display_mode *mode,
                         struct drm_display_mode *adjusted_mode)
 {
-       uint16_t ref_pix, ref_line, n_pix, n_line;
-       uint16_t hs_pix_s, hs_pix_e;
-       uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
-       uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
-       uint16_t vwin1_line_s, vwin1_line_e;
-       uint16_t vwin2_line_s, vwin2_line_e;
-       uint16_t de_pix_s, de_pix_e;
-       uint8_t reg, div, rep;
+       struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+       u16 ref_pix, ref_line, n_pix, n_line;
+       u16 hs_pix_s, hs_pix_e;
+       u16 vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
+       u16 vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
+       u16 vwin1_line_s, vwin1_line_e;
+       u16 vwin2_line_s, vwin2_line_e;
+       u16 de_pix_s, de_pix_e;
+       u8 reg, div, rep;
 
        /*
         * Internally TDA998x is using ITU-R BT.656 style sync but
@@ -1031,9 +1080,10 @@ tda998x_encoder_mode_set(struct tda998x_priv *priv,
 }
 
 static enum drm_connector_status
-tda998x_encoder_detect(struct tda998x_priv *priv)
+tda998x_connector_detect(struct drm_connector *connector, bool force)
 {
-       uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV);
+       struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
+       u8 val = cec_read(priv, REG_CEC_RXSHPDLEV);
 
        return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
                        connector_status_disconnected;
@@ -1042,7 +1092,7 @@ tda998x_encoder_detect(struct tda998x_priv *priv)
 static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
 {
        struct tda998x_priv *priv = data;
-       uint8_t offset, segptr;
+       u8 offset, segptr;
        int ret, i;
 
        offset = (blk & 1) ? 128 : 0;
@@ -1095,13 +1145,20 @@ static int read_edid_block(void *data, u8 *buf, unsigned int blk, size_t length)
        return 0;
 }
 
-static int
-tda998x_encoder_get_modes(struct tda998x_priv *priv,
-                         struct drm_connector *connector)
+static int tda998x_connector_get_modes(struct drm_connector *connector)
 {
+       struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
        struct edid *edid;
        int n;
 
+       /*
+        * If we get killed while waiting for the HPD timeout, return
+        * no modes found: we are not in a restartable path, so we
+        * can't handle signals gracefully.
+        */
+       if (tda998x_edid_delay_wait(priv))
+               return 0;
+
        if (priv->rev == TDA19988)
                reg_clear(priv, REG_TX4, TX4_PD_RAM);
 
@@ -1133,101 +1190,21 @@ static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
                        DRM_CONNECTOR_POLL_DISCONNECT;
 }
 
-static int
-tda998x_encoder_set_property(struct drm_encoder *encoder,
-                           struct drm_connector *connector,
-                           struct drm_property *property,
-                           uint64_t val)
-{
-       DBG("");
-       return 0;
-}
-
 static void tda998x_destroy(struct tda998x_priv *priv)
 {
        /* disable all IRQs and free the IRQ handler */
        cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
        reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-       if (priv->hdmi->irq) {
-               free_irq(priv->hdmi->irq, priv);
-               cancel_delayed_work_sync(&priv->dwork);
-       }
-
-       i2c_unregister_device(priv->cec);
-}
-
-/* Slave encoder support */
-
-static void
-tda998x_encoder_slave_set_config(struct drm_encoder *encoder, void *params)
-{
-       tda998x_encoder_set_config(to_tda998x_priv(encoder), params);
-}
 
-static void tda998x_encoder_slave_destroy(struct drm_encoder *encoder)
-{
-       struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
-       tda998x_destroy(priv);
-       drm_i2c_encoder_destroy(encoder);
-       kfree(priv);
-}
-
-static void tda998x_encoder_slave_dpms(struct drm_encoder *encoder, int mode)
-{
-       tda998x_encoder_dpms(to_tda998x_priv(encoder), mode);
-}
-
-static int tda998x_encoder_slave_mode_valid(struct drm_encoder *encoder,
-                                           struct drm_display_mode *mode)
-{
-       return tda998x_encoder_mode_valid(to_tda998x_priv(encoder), mode);
-}
+       if (priv->hdmi->irq)
+               free_irq(priv->hdmi->irq, priv);
 
-static void
-tda998x_encoder_slave_mode_set(struct drm_encoder *encoder,
-                              struct drm_display_mode *mode,
-                              struct drm_display_mode *adjusted_mode)
-{
-       tda998x_encoder_mode_set(to_tda998x_priv(encoder), mode, adjusted_mode);
-}
+       del_timer_sync(&priv->edid_delay_timer);
+       cancel_work_sync(&priv->detect_work);
 
-static enum drm_connector_status
-tda998x_encoder_slave_detect(struct drm_encoder *encoder,
-                            struct drm_connector *connector)
-{
-       return tda998x_encoder_detect(to_tda998x_priv(encoder));
-}
-
-static int tda998x_encoder_slave_get_modes(struct drm_encoder *encoder,
-                                          struct drm_connector *connector)
-{
-       return tda998x_encoder_get_modes(to_tda998x_priv(encoder), connector);
-}
-
-static int
-tda998x_encoder_slave_create_resources(struct drm_encoder *encoder,
-                                      struct drm_connector *connector)
-{
-       tda998x_encoder_set_polling(to_tda998x_priv(encoder), connector);
-       return 0;
+       i2c_unregister_device(priv->cec);
 }
 
-static struct drm_encoder_slave_funcs tda998x_encoder_slave_funcs = {
-       .set_config = tda998x_encoder_slave_set_config,
-       .destroy = tda998x_encoder_slave_destroy,
-       .dpms = tda998x_encoder_slave_dpms,
-       .save = tda998x_encoder_save,
-       .restore = tda998x_encoder_restore,
-       .mode_fixup = tda998x_encoder_mode_fixup,
-       .mode_valid = tda998x_encoder_slave_mode_valid,
-       .mode_set = tda998x_encoder_slave_mode_set,
-       .detect = tda998x_encoder_slave_detect,
-       .get_modes = tda998x_encoder_slave_get_modes,
-       .create_resources = tda998x_encoder_slave_create_resources,
-       .set_property = tda998x_encoder_set_property,
-};
-
 /* I2C driver functions */
 
 static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
@@ -1252,6 +1229,10 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        priv->dpms = DRM_MODE_DPMS_OFF;
 
        mutex_init(&priv->mutex);       /* protect the page access */
+       init_waitqueue_head(&priv->edid_delay_waitq);
+       setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
+                   (unsigned long)priv);
+       INIT_WORK(&priv->detect_work, tda998x_detect_work);
 
        /* wake up the device: */
        cec_write(priv, REG_CEC_ENAMODS,
@@ -1310,7 +1291,6 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
                /* init read EDID waitqueue and HDP work */
                init_waitqueue_head(&priv->wq_edid);
-               INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
 
                /* clear pending interrupts */
                reg_read(priv, REG_INT_FLAGS_0);
@@ -1359,84 +1339,31 @@ fail:
        return -ENXIO;
 }
 
-static int tda998x_encoder_init(struct i2c_client *client,
-                               struct drm_device *dev,
-                               struct drm_encoder_slave *encoder_slave)
-{
-       struct tda998x_priv *priv;
-       int ret;
-
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       priv->encoder = &encoder_slave->base;
-
-       ret = tda998x_create(client, priv);
-       if (ret) {
-               kfree(priv);
-               return ret;
-       }
-
-       encoder_slave->slave_priv = priv;
-       encoder_slave->slave_funcs = &tda998x_encoder_slave_funcs;
-
-       return 0;
-}
-
-struct tda998x_priv2 {
-       struct tda998x_priv base;
-       struct drm_encoder encoder;
-       struct drm_connector connector;
-};
-
-#define conn_to_tda998x_priv2(x) \
-       container_of(x, struct tda998x_priv2, connector);
-
-#define enc_to_tda998x_priv2(x) \
-       container_of(x, struct tda998x_priv2, encoder);
-
-static void tda998x_encoder2_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
-
-       tda998x_encoder_dpms(&priv->base, mode);
-}
-
 static void tda998x_encoder_prepare(struct drm_encoder *encoder)
 {
-       tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_OFF);
+       tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 }
 
 static void tda998x_encoder_commit(struct drm_encoder *encoder)
 {
-       tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static void tda998x_encoder2_mode_set(struct drm_encoder *encoder,
-                                     struct drm_display_mode *mode,
-                                     struct drm_display_mode *adjusted_mode)
-{
-       struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
-
-       tda998x_encoder_mode_set(&priv->base, mode, adjusted_mode);
+       tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
 }
 
 static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
-       .dpms = tda998x_encoder2_dpms,
+       .dpms = tda998x_encoder_dpms,
        .save = tda998x_encoder_save,
        .restore = tda998x_encoder_restore,
        .mode_fixup = tda998x_encoder_mode_fixup,
        .prepare = tda998x_encoder_prepare,
        .commit = tda998x_encoder_commit,
-       .mode_set = tda998x_encoder2_mode_set,
+       .mode_set = tda998x_encoder_mode_set,
 };
 
 static void tda998x_encoder_destroy(struct drm_encoder *encoder)
 {
-       struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+       struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
 
-       tda998x_destroy(&priv->base);
+       tda998x_destroy(priv);
        drm_encoder_cleanup(encoder);
 }
 
@@ -1444,25 +1371,10 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
        .destroy = tda998x_encoder_destroy,
 };
 
-static int tda998x_connector_get_modes(struct drm_connector *connector)
-{
-       struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
-
-       return tda998x_encoder_get_modes(&priv->base, connector);
-}
-
-static int tda998x_connector_mode_valid(struct drm_connector *connector,
-                                       struct drm_display_mode *mode)
-{
-       struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
-
-       return tda998x_encoder_mode_valid(&priv->base, mode);
-}
-
 static struct drm_encoder *
 tda998x_connector_best_encoder(struct drm_connector *connector)
 {
-       struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+       struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
 
        return &priv->encoder;
 }
@@ -1474,14 +1386,6 @@ const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
        .best_encoder = tda998x_connector_best_encoder,
 };
 
-static enum drm_connector_status
-tda998x_connector_detect(struct drm_connector *connector, bool force)
-{
-       struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
-
-       return tda998x_encoder_detect(&priv->base);
-}
-
 static void tda998x_connector_destroy(struct drm_connector *connector)
 {
        drm_connector_unregister(connector);
@@ -1500,8 +1404,8 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
        struct tda998x_encoder_params *params = dev->platform_data;
        struct i2c_client *client = to_i2c_client(dev);
        struct drm_device *drm = data;
-       struct tda998x_priv2 *priv;
-       uint32_t crtcs = 0;
+       struct tda998x_priv *priv;
+       u32 crtcs = 0;
        int ret;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -1519,18 +1423,17 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
                crtcs = 1 << 0;
        }
 
-       priv->base.encoder = &priv->encoder;
        priv->connector.interlace_allowed = 1;
        priv->encoder.possible_crtcs = crtcs;
 
-       ret = tda998x_create(client, &priv->base);
+       ret = tda998x_create(client, priv);
        if (ret)
                return ret;
 
        if (!dev->of_node && params)
-               tda998x_encoder_set_config(&priv->base, params);
+               tda998x_encoder_set_config(priv, params);
 
-       tda998x_encoder_set_polling(&priv->base, &priv->connector);
+       tda998x_encoder_set_polling(priv, &priv->connector);
 
        drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
        ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
@@ -1560,18 +1463,18 @@ err_sysfs:
 err_connector:
        drm_encoder_cleanup(&priv->encoder);
 err_encoder:
-       tda998x_destroy(&priv->base);
+       tda998x_destroy(priv);
        return ret;
 }
 
 static void tda998x_unbind(struct device *dev, struct device *master,
                           void *data)
 {
-       struct tda998x_priv2 *priv = dev_get_drvdata(dev);
+       struct tda998x_priv *priv = dev_get_drvdata(dev);
 
        drm_connector_cleanup(&priv->connector);
        drm_encoder_cleanup(&priv->encoder);
-       tda998x_destroy(&priv->base);
+       tda998x_destroy(priv);
 }
 
 static const struct component_ops tda998x_ops = {
@@ -1605,38 +1508,18 @@ static struct i2c_device_id tda998x_ids[] = {
 };
 MODULE_DEVICE_TABLE(i2c, tda998x_ids);
 
-static struct drm_i2c_encoder_driver tda998x_driver = {
-       .i2c_driver = {
-               .probe = tda998x_probe,
-               .remove = tda998x_remove,
-               .driver = {
-                       .name = "tda998x",
-                       .of_match_table = of_match_ptr(tda998x_dt_ids),
-               },
-               .id_table = tda998x_ids,
+static struct i2c_driver tda998x_driver = {
+       .probe = tda998x_probe,
+       .remove = tda998x_remove,
+       .driver = {
+               .name = "tda998x",
+               .of_match_table = of_match_ptr(tda998x_dt_ids),
        },
-       .encoder_init = tda998x_encoder_init,
+       .id_table = tda998x_ids,
 };
 
-/* Module initialization */
-
-static int __init
-tda998x_init(void)
-{
-       DBG("");
-       return drm_i2c_encoder_register(THIS_MODULE, &tda998x_driver);
-}
-
-static void __exit
-tda998x_exit(void)
-{
-       DBG("");
-       drm_i2c_encoder_unregister(&tda998x_driver);
-}
+module_i2c_driver(tda998x_driver);
 
 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
 MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder");
 MODULE_LICENSE("GPL");
-
-module_init(tda998x_init);
-module_exit(tda998x_exit);
index 998b4643109f6cf46021b40fe88f606303c79893..44d290ae1999113ef6e7a915c4289a1d00cf753c 100644 (file)
@@ -40,6 +40,10 @@ i915-y += i915_cmd_parser.o \
          intel_ringbuffer.o \
          intel_uncore.o
 
+# general-purpose microcontroller (GuC) support
+i915-y += intel_guc_loader.o \
+         i915_guc_submission.o
+
 # autogenerated null render state
 i915-y += intel_renderstate_gen6.o \
          intel_renderstate_gen7.o \
index 237ff6884a2227bc9b7520ed4bcaabe75d924f74..09932cab1a3f9ba5d01d4abaee8a7d9053eed768 100644 (file)
@@ -94,7 +94,7 @@
 #define CMD(op, opm, f, lm, fl, ...)                           \
        {                                                       \
                .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0),     \
-               .cmd = { (op), (opm) },                         \
+               .cmd = { (op), (opm) },                         \
                .length = { (lm) },                             \
                __VA_ARGS__                                     \
        }
@@ -124,14 +124,14 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
        CMD(  MI_STORE_DWORD_INDEX,             SMI,   !F,  0xFF,   R  ),
        CMD(  MI_LOAD_REGISTER_IMM(1),          SMI,   !F,  0xFF,   W,
              .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 }    ),
-       CMD(  MI_STORE_REGISTER_MEM(1),         SMI,   !F,  0xFF,   W | B,
+       CMD(  MI_STORE_REGISTER_MEM,            SMI,    F,  3,     W | B,
              .reg = { .offset = 1, .mask = 0x007FFFFC },
              .bits = {{
                        .offset = 0,
                        .mask = MI_GLOBAL_GTT,
                        .expected = 0,
              }},                                                      ),
-       CMD(  MI_LOAD_REGISTER_MEM(1),             SMI,   !F,  0xFF,   W | B,
+       CMD(  MI_LOAD_REGISTER_MEM,             SMI,    F,  3,     W | B,
              .reg = { .offset = 1, .mask = 0x007FFFFC },
              .bits = {{
                        .offset = 0,
@@ -1021,7 +1021,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
                         * only MI_LOAD_REGISTER_IMM commands.
                         */
                        if (reg_addr == OACONTROL) {
-                               if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
+                               if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
                                        DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
                                        return false;
                                }
@@ -1035,7 +1035,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
                         * allowed mask/value pair given in the whitelist entry.
                         */
                        if (reg->mask) {
-                               if (desc->cmd.value == MI_LOAD_REGISTER_MEM(1)) {
+                               if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
                                        DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
                                                         reg_addr);
                                        return false;
@@ -1213,6 +1213,7 @@ int i915_cmd_parser_get_version(void)
         * 2. Allow access to the MI_PREDICATE_SRC0 and
         *    MI_PREDICATE_SRC1 registers.
         * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
+        * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
         */
-       return 3;
+       return 4;
 }
index e3ec9049081fd89a774f055c270da8a4524cf09c..84c7b6b294ee5318128a7ffd0423dca09c09b6dd 100644 (file)
@@ -46,11 +46,6 @@ enum {
        PINNED_LIST,
 };
 
-static const char *yesno(int v)
-{
-       return v ? "yes" : "no";
-}
-
 /* As the drm_debugfs_init() routines are called before dev->dev_private is
  * allocated we need to hook into the minor for release. */
 static int
@@ -957,7 +952,6 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
        if (ret)
                return ret;
 
-       seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
        seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
@@ -1387,17 +1381,16 @@ static int ironlake_drpc_info(struct seq_file *m)
        intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
-       seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
-                  "yes" : "no");
+       seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
        seq_printf(m, "Boost freq: %d\n",
                   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
                   MEMMODE_BOOST_FREQ_SHIFT);
        seq_printf(m, "HW control enabled: %s\n",
-                  rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
+                  yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
        seq_printf(m, "SW control enabled: %s\n",
-                  rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
+                  yesno(rgvmodectl & MEMMODE_SWMODE_EN));
        seq_printf(m, "Gated voltage change: %s\n",
-                  rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
+                  yesno(rgvmodectl & MEMMODE_RCLK_GATE));
        seq_printf(m, "Starting frequency: P%d\n",
                   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
        seq_printf(m, "Max P-state: P%d\n",
@@ -1406,7 +1399,7 @@ static int ironlake_drpc_info(struct seq_file *m)
        seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
        seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
        seq_printf(m, "Render standby enabled: %s\n",
-                  (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
+                  yesno(!(rstdbyctl & RCX_SW_EXIT)));
        seq_puts(m, "Current RS state: ");
        switch (rstdbyctl & RSX_STATUS_MASK) {
        case RSX_STATUS_ON:
@@ -1995,7 +1988,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
                return;
        }
 
-       page = i915_gem_object_get_page(ctx_obj, 1);
+       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
        if (!WARN_ON(page == NULL)) {
                reg_state = kmap_atomic(page);
 
@@ -2075,8 +2068,8 @@ static int i915_execlists(struct seq_file *m, void *data)
 
                seq_printf(m, "%s\n", ring->name);
 
-               status = I915_READ(RING_EXECLIST_STATUS(ring));
-               ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
+               status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
+               ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
                seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
                           status, ctx_id);
 
@@ -2091,8 +2084,8 @@ static int i915_execlists(struct seq_file *m, void *data)
                           read_pointer, write_pointer);
 
                for (i = 0; i < 6; i++) {
-                       status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
-                       ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
+                       status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
+                       ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
 
                        seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
                                   i, status, ctx_id);
@@ -2250,7 +2243,6 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring;
-       struct drm_file *file;
        int i;
 
        if (INTEL_INFO(dev)->gen == 6)
@@ -2273,13 +2265,6 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
                ppgtt->debug_dump(ppgtt, m);
        }
 
-       list_for_each_entry_reverse(file, &dev->filelist, lhead) {
-               struct drm_i915_file_private *file_priv = file->driver_priv;
-
-               seq_printf(m, "proc: %s\n",
-                          get_pid_task(file->pid, PIDTYPE_PID)->comm);
-               idr_for_each(&file_priv->context_idr, per_file_ctx, m);
-       }
        seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
 }
 
@@ -2288,6 +2273,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_file *file;
 
        int ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
@@ -2299,6 +2285,19 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
        else if (INTEL_INFO(dev)->gen >= 6)
                gen6_ppgtt_info(m, dev);
 
+       list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+               struct drm_i915_file_private *file_priv = file->driver_priv;
+               struct task_struct *task;
+
+               task = get_pid_task(file->pid, PIDTYPE_PID);
+               if (!task)
+                       return -ESRCH;
+               seq_printf(m, "\nproc: %s\n", task->comm);
+               put_task_struct(task);
+               idr_for_each(&file_priv->context_idr, per_file_ctx,
+                            (void *)(unsigned long)m);
+       }
+
        intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
@@ -2372,6 +2371,147 @@ static int i915_llc(struct seq_file *m, void *data)
        return 0;
 }
 
+static int i915_guc_load_status_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
+       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       u32 tmp, i;
+
+       if (!HAS_GUC_UCODE(dev_priv->dev))
+               return 0;
+
+       seq_printf(m, "GuC firmware status:\n");
+       seq_printf(m, "\tpath: %s\n",
+               guc_fw->guc_fw_path);
+       seq_printf(m, "\tfetch: %s\n",
+               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+       seq_printf(m, "\tload: %s\n",
+               intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+       seq_printf(m, "\tversion wanted: %d.%d\n",
+               guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+       seq_printf(m, "\tversion found: %d.%d\n",
+               guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
+
+       tmp = I915_READ(GUC_STATUS);
+
+       seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
+       seq_printf(m, "\tBootrom status = 0x%x\n",
+               (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+       seq_printf(m, "\tuKernel status = 0x%x\n",
+               (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+       seq_printf(m, "\tMIA Core status = 0x%x\n",
+               (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
+       seq_puts(m, "\nScratch registers:\n");
+       for (i = 0; i < 16; i++)
+               seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
+
+       return 0;
+}
+
+static void i915_guc_client_info(struct seq_file *m,
+                                struct drm_i915_private *dev_priv,
+                                struct i915_guc_client *client)
+{
+       struct intel_engine_cs *ring;
+       uint64_t tot = 0;
+       uint32_t i;
+
+       seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
+               client->priority, client->ctx_index, client->proc_desc_offset);
+       seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
+               client->doorbell_id, client->doorbell_offset, client->cookie);
+       seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
+               client->wq_size, client->wq_offset, client->wq_tail);
+
+       seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
+       seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
+       seq_printf(m, "\tLast submission result: %d\n", client->retcode);
+
+       for_each_ring(ring, dev_priv, i) {
+               seq_printf(m, "\tSubmissions: %llu %s\n",
+                               client->submissions[i],
+                               ring->name);
+               tot += client->submissions[i];
+       }
+       seq_printf(m, "\tTotal: %llu\n", tot);
+}
+
+static int i915_guc_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_guc guc;
+       struct i915_guc_client client = {};
+       struct intel_engine_cs *ring;
+       enum intel_ring_id i;
+       u64 total = 0;
+
+       if (!HAS_GUC_SCHED(dev_priv->dev))
+               return 0;
+
+       /* Take a local copy of the GuC data, so we can dump it at leisure */
+       spin_lock(&dev_priv->guc.host2guc_lock);
+       guc = dev_priv->guc;
+       if (guc.execbuf_client) {
+               spin_lock(&guc.execbuf_client->wq_lock);
+               client = *guc.execbuf_client;
+               spin_unlock(&guc.execbuf_client->wq_lock);
+       }
+       spin_unlock(&dev_priv->guc.host2guc_lock);
+
+       seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
+       seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
+       seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
+       seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
+       seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
+
+       seq_printf(m, "\nGuC submissions:\n");
+       for_each_ring(ring, dev_priv, i) {
+               seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
+                       ring->name, guc.submissions[i],
+                       guc.last_seqno[i], guc.last_seqno[i]);
+               total += guc.submissions[i];
+       }
+       seq_printf(m, "\t%s: %llu\n", "Total", total);
+
+       seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
+       i915_guc_client_info(m, dev_priv, &client);
+
+       /* Add more as required ... */
+
+       return 0;
+}
+
+static int i915_guc_log_dump(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
+       u32 *log;
+       int i = 0, pg;
+
+       if (!log_obj)
+               return 0;
+
+       for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
+               log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
+
+               for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
+                       seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+                                  *(log + i), *(log + i + 1),
+                                  *(log + i + 2), *(log + i + 3));
+
+               kunmap_atomic(log);
+       }
+
+       seq_putc(m, '\n');
+
+       return 0;
+}
+
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
@@ -2680,11 +2820,13 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
        struct drm_device *dev = node->minor->dev;
        struct drm_crtc *crtc = &intel_crtc->base;
        struct intel_encoder *intel_encoder;
+       struct drm_plane_state *plane_state = crtc->primary->state;
+       struct drm_framebuffer *fb = plane_state->fb;
 
-       if (crtc->primary->fb)
+       if (fb)
                seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
-                          crtc->primary->fb->base.id, crtc->x, crtc->y,
-                          crtc->primary->fb->width, crtc->primary->fb->height);
+                          fb->base.id, plane_state->src_x >> 16,
+                          plane_state->src_y >> 16, fb->width, fb->height);
        else
                seq_puts(m, "\tprimary plane disabled\n");
        for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -2706,8 +2848,7 @@ static void intel_dp_info(struct seq_file *m,
        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
        seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
-       seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
-                  "no");
+       seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
        if (intel_encoder->type == INTEL_OUTPUT_EDP)
                intel_panel_info(m, &intel_connector->panel);
 }
@@ -2718,8 +2859,7 @@ static void intel_hdmi_info(struct seq_file *m,
        struct intel_encoder *intel_encoder = intel_connector->encoder;
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
 
-       seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
-                  "no");
+       seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
 }
 
 static void intel_lvds_info(struct seq_file *m,
@@ -4807,7 +4947,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
                                          struct sseu_dev_status *stat)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const int ss_max = 2;
+       int ss_max = 2;
        int ss;
        u32 sig1[ss_max], sig2[ss_max];
 
@@ -5033,6 +5173,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
        {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
        {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
+       {"i915_guc_info", i915_guc_info, 0},
+       {"i915_guc_load_status", i915_guc_load_status_info, 0},
+       {"i915_guc_log_dump", i915_guc_log_dump, 0},
        {"i915_frequency_info", i915_frequency_info, 0},
        {"i915_hangcheck_info", i915_hangcheck_info, 0},
        {"i915_drpc_info", i915_drpc_info, 0},
index ab37d1121be8277728bff5d25a0cb4a4599de0aa..0eda746850ef62df96fb8c12f3fa4e6457b75ad6 100644 (file)
@@ -75,7 +75,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = 1;
                break;
        case I915_PARAM_NUM_FENCES_AVAIL:
-               value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+               value = dev_priv->num_fence_regs;
                break;
        case I915_PARAM_HAS_OVERLAY:
                value = dev_priv->overlay ? 1 : 0;
@@ -183,35 +183,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
        return 0;
 }
 
-static int i915_setparam(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_i915_setparam_t *param = data;
-
-       switch (param->param) {
-       case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
-       case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
-       case I915_SETPARAM_ALLOW_BATCHBUFFER:
-               /* Reject all old ums/dri params. */
-               return -ENODEV;
-
-       case I915_SETPARAM_NUM_USED_FENCES:
-               if (param->value > dev_priv->num_fence_regs ||
-                   param->value < 0)
-                       return -EINVAL;
-               /* Userspace can use first N regs */
-               dev_priv->fence_reg_start = param->value;
-               break;
-       default:
-               DRM_DEBUG_DRIVER("unknown parameter %d\n",
-                                       param->param);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int i915_get_bridge_dev(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -364,12 +335,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                /* i915 resume handler doesn't set to D0 */
                pci_set_power_state(dev->pdev, PCI_D0);
-               i915_resume_legacy(dev);
+               i915_resume_switcheroo(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
                pr_err("switched off\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               i915_suspend_legacy(dev, pmm);
+               i915_suspend_switcheroo(dev, pmm);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
        }
 }
@@ -435,6 +406,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
 
+       /* intel_guc_ucode_init() needs the mutex to allocate GEM objects */
+       mutex_lock(&dev->struct_mutex);
+       intel_guc_ucode_init(dev);
+       mutex_unlock(&dev->struct_mutex);
+
        ret = i915_gem_init(dev);
        if (ret)
                goto cleanup_irq;
@@ -476,6 +452,9 @@ cleanup_gem:
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
 cleanup_irq:
+       mutex_lock(&dev->struct_mutex);
+       intel_guc_ucode_fini(dev);
+       mutex_unlock(&dev->struct_mutex);
        drm_irq_uninstall(dev);
 cleanup_gem_stolen:
        i915_gem_cleanup_stolen(dev);
@@ -623,17 +602,6 @@ static void gen9_sseu_info_init(struct drm_device *dev)
        u32 fuse2, s_enable, ss_disable, eu_disable;
        u8 eu_mask = 0xff;
 
-       /*
-        * BXT has a single slice. BXT also has at most 6 EU per subslice,
-        * and therefore only the lowest 6 bits of the 8-bit EU disable
-        * fields are valid.
-       */
-       if (IS_BROXTON(dev)) {
-               s_max = 1;
-               eu_max = 6;
-               eu_mask = 0x3f;
-       }
-
        info = (struct intel_device_info *)&dev_priv->info;
        fuse2 = I915_READ(GEN8_FUSE2);
        s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
@@ -791,6 +759,24 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
                         info->has_eu_pg ? "y" : "n");
 }
 
+static void intel_init_dpio(struct drm_i915_private *dev_priv)
+{
+       if (!IS_VALLEYVIEW(dev_priv))
+               return;
+
+       /*
+        * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+        * CHV x1 PHY (DP/HDMI D)
+        * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+        */
+       if (IS_CHERRYVIEW(dev_priv)) {
+               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+               DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
+       } else {
+               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+       }
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -971,8 +957,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        intel_setup_gmbus(dev);
        intel_opregion_setup(dev);
 
-       intel_setup_bios(dev);
-
        i915_gem_load(dev);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -991,6 +975,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        intel_device_info_runtime_init(dev);
 
+       intel_init_dpio(dev_priv);
+
        if (INTEL_INFO(dev)->num_pipes) {
                ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
                if (ret)
@@ -1111,6 +1097,10 @@ int i915_driver_unload(struct drm_device *dev)
                dev_priv->vbt.child_dev = NULL;
                dev_priv->vbt.child_dev_num = 0;
        }
+       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
 
        vga_switcheroo_unregister_client(dev->pdev);
        vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -1128,6 +1118,7 @@ int i915_driver_unload(struct drm_device *dev)
        flush_workqueue(dev_priv->wq);
 
        mutex_lock(&dev->struct_mutex);
+       intel_guc_ucode_fini(dev);
        i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
@@ -1226,7 +1217,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
index ab64d68388f232b543bf19c1726b44c7cd329eaa..e6d7a69ec1bfa01b2d0e008684bd72ac67579f29 100644 (file)
@@ -362,6 +362,7 @@ static const struct intel_device_info intel_skylake_info = {
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .has_llc = 1,
        .has_ddi = 1,
+       .has_fpga_dbg = 1,
        .has_fbc = 1,
        GEN_DEFAULT_PIPEOFFSETS,
        IVB_CURSOR_OFFSETS,
@@ -374,6 +375,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
        .has_llc = 1,
        .has_ddi = 1,
+       .has_fpga_dbg = 1,
        .has_fbc = 1,
        GEN_DEFAULT_PIPEOFFSETS,
        IVB_CURSOR_OFFSETS,
@@ -386,6 +388,7 @@ static const struct intel_device_info intel_broxton_info = {
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .num_pipes = 3,
        .has_ddi = 1,
+       .has_fpga_dbg = 1,
        .has_fbc = 1,
        GEN_DEFAULT_PIPEOFFSETS,
        IVB_CURSOR_OFFSETS,
@@ -679,7 +682,7 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
        return 0;
 }
 
-int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
+int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
 {
        int error;
 
@@ -812,7 +815,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
        return ret;
 }
 
-int i915_resume_legacy(struct drm_device *dev)
+int i915_resume_switcheroo(struct drm_device *dev)
 {
        int ret;
 
@@ -1117,7 +1120,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
        s->gfx_pend_tlb1        = I915_READ(GEN7_GFX_PEND_TLB1);
 
        for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
-               s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
+               s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
 
        s->media_max_req_count  = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
        s->gfx_max_req_count    = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
@@ -1161,7 +1164,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
        s->pm_ier               = I915_READ(GEN6_PMIER);
 
        for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
-               s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
+               s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
 
        /* GT SA CZ domain, 0x100000-0x138124 */
        s->tilectl              = I915_READ(TILECTL);
@@ -1199,7 +1202,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN7_GFX_PEND_TLB1,  s->gfx_pend_tlb1);
 
        for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
-               I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
+               I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
 
        I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
        I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
@@ -1243,7 +1246,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_PMIER,          s->pm_ier);
 
        for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
-               I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
+               I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
 
        /* GT SA CZ domain, 0x100000-0x138124 */
        I915_WRITE(TILECTL,                     s->tilectl);
@@ -1552,6 +1555,15 @@ static int intel_runtime_resume(struct device *device)
        gen6_update_ring_freq(dev);
 
        intel_runtime_pm_enable_interrupts(dev_priv);
+
+       /*
+        * On VLV/CHV display interrupts are part of the display
+        * power well, so hpd is reinitialized from there. For
+        * everyone else do it here.
+        */
+       if (!IS_VALLEYVIEW(dev_priv))
+               intel_hpd_init(dev_priv);
+
        intel_enable_gt_powersave(dev);
 
        if (ret)
@@ -1649,7 +1661,7 @@ static struct drm_driver driver = {
         */
        .driver_features =
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
-           DRIVER_RENDER,
+           DRIVER_RENDER | DRIVER_MODESET,
        .load = i915_driver_load,
        .unload = i915_driver_unload,
        .open = i915_driver_open,
@@ -1658,10 +1670,6 @@ static struct drm_driver driver = {
        .postclose = i915_driver_postclose,
        .set_busid = drm_pci_set_busid,
 
-       /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
-       .suspend = i915_suspend_legacy,
-       .resume = i915_resume_legacy,
-
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = i915_debugfs_init,
        .debugfs_cleanup = i915_debugfs_cleanup,
@@ -1704,7 +1712,6 @@ static int __init i915_init(void)
         * either the i915.modeset prarameter or by the
         * vga_text_mode_force boot option.
         */
-       driver.driver_features |= DRIVER_MODESET;
 
        if (i915.modeset == 0)
                driver.driver_features &= ~DRIVER_MODESET;
@@ -1715,18 +1722,12 @@ static int __init i915_init(void)
 #endif
 
        if (!(driver.driver_features & DRIVER_MODESET)) {
-               driver.get_vblank_timestamp = NULL;
                /* Silently fail loading to not upset userspace. */
                DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
                return 0;
        }
 
-       /*
-        * FIXME: Note that we're lying to the DRM core here so that we can get access
-        * to the atomic ioctl and the atomic properties.  Only plane operations on
-        * a single CRTC will actually work.
-        */
-       if (driver.driver_features & DRIVER_MODESET)
+       if (i915.nuclear_pageflip)
                driver.driver_features |= DRIVER_ATOMIC;
 
        return drm_pci_init(&driver, &i915_pci_driver);
index e1db8de52851b979c31607abf7b6995bd1febe0b..0841ca569ccb78d213d0e3739ffae0eada31d2e7 100644 (file)
 #include <linux/intel-iommu.h>
 #include <linux/kref.h>
 #include <linux/pm_qos.h>
+#include "intel_guc.h"
 
 /* General customization:
  */
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20150731"
+#define DRIVER_DATE            "20150928"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
                BUILD_BUG_ON(__i915_warn_cond); \
        WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
 #else
-#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
+#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
 #endif
 
 #undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
 
 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
                             (long) (x), __func__);
        unlikely(__ret_warn_on);                                        \
 })
 
+static inline const char *yesno(bool v)
+{
+       return v ? "yes" : "no";
+}
+
 enum pipe {
        INVALID_PIPE = -1,
        PIPE_A = 0,
@@ -549,7 +555,7 @@ struct drm_i915_error_state {
 
                struct drm_i915_error_object {
                        int page_count;
-                       u32 gtt_offset;
+                       u64 gtt_offset;
                        u32 *pages[0];
                } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
 
@@ -575,7 +581,7 @@ struct drm_i915_error_state {
                u32 size;
                u32 name;
                u32 rseqno[I915_NUM_RINGS], wseqno;
-               u32 gtt_offset;
+               u64 gtt_offset;
                u32 read_domains;
                u32 write_domain;
                s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
@@ -665,6 +671,8 @@ struct drm_i915_display_funcs {
                              uint32_t level);
        void (*disable_backlight)(struct intel_connector *connector);
        void (*enable_backlight)(struct intel_connector *connector);
+       uint32_t (*backlight_hz_to_pwm)(struct intel_connector *connector,
+                                       uint32_t hz);
 };
 
 enum forcewake_domain_id {
@@ -882,7 +890,6 @@ struct intel_context {
        } legacy_hw_ctx;
 
        /* Execlists */
-       bool rcs_initialized;
        struct {
                struct drm_i915_gem_object *state;
                struct intel_ringbuffer *ringbuf;
@@ -941,6 +948,9 @@ struct i915_fbc {
                FBC_CHIP_DEFAULT, /* disabled by default on this chip */
                FBC_ROTATION, /* rotation is not supported */
                FBC_IN_DBG_MASTER, /* kernel debugger is active */
+               FBC_BAD_STRIDE, /* stride is not supported */
+               FBC_PIXEL_RATE, /* pixel rate is too big */
+               FBC_PIXEL_FORMAT /* pixel format is invalid */
        } no_fbc_reason;
 
        bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
@@ -1693,7 +1703,7 @@ struct i915_execbuffer_params {
        struct drm_file                 *file;
        uint32_t                        dispatch_flags;
        uint32_t                        args_batch_start_offset;
-       uint32_t                        batch_obj_vm_offset;
+       uint64_t                        batch_obj_vm_offset;
        struct intel_engine_cs          *ring;
        struct drm_i915_gem_object      *batch_obj;
        struct intel_context            *ctx;
@@ -1716,6 +1726,8 @@ struct drm_i915_private {
 
        struct i915_virtual_gpu vgpu;
 
+       struct intel_guc guc;
+
        struct intel_csr csr;
 
        /* Display CSR-related protection */
@@ -1790,12 +1802,12 @@ struct drm_i915_private {
        struct mutex pps_mutex;
 
        struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
-       int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 
        unsigned int fsb_freq, mem_freq, is_ddr3;
        unsigned int skl_boot_cdclk;
        unsigned int cdclk_freq, max_cdclk_freq;
+       unsigned int max_dotclk_freq;
        unsigned int hpll_freq;
 
        /**
@@ -1963,6 +1975,11 @@ static inline struct drm_i915_private *dev_to_i915(struct device *dev)
        return to_i915(dev_get_drvdata(dev));
 }
 
+static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
+{
+       return container_of(guc, struct drm_i915_private, guc);
+}
+
 /* Iterate over initialised rings */
 #define for_each_ring(ring__, dev_priv__, i__) \
        for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -1999,25 +2016,26 @@ struct drm_i915_gem_object_ops {
 
 /*
  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
- * considered to be the frontbuffer for the given plane interface-vise. This
+ * considered to be the frontbuffer for the given plane interface-wise. This
  * doesn't mean that the hw necessarily already scans it out, but that any
  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
  *
  * We have one bit per pipe and per scanout plane type.
  */
-#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
 #define INTEL_FRONTBUFFER_BITS \
        (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
        (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
 #define INTEL_FRONTBUFFER_CURSOR(pipe) \
-       (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
-#define INTEL_FRONTBUFFER_SPRITE(pipe) \
-       (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+       (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
+       (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
-       (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+       (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
-       (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+       (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
 
 struct drm_i915_gem_object {
        struct drm_gem_object base;
@@ -2475,6 +2493,11 @@ struct drm_i915_cmd_table {
 #define IS_SKL_ULX(dev)                (INTEL_DEVID(dev) == 0x190E || \
                                 INTEL_DEVID(dev) == 0x1915 || \
                                 INTEL_DEVID(dev) == 0x191E)
+#define IS_SKL_GT3(dev)                (IS_SKYLAKE(dev) && \
+                                (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
+#define IS_SKL_GT4(dev)                (IS_SKYLAKE(dev) && \
+                                (INTEL_DEVID(dev) & 0x00F0) == 0x0030)
+
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 #define SKL_REVID_A0           (0x0)
@@ -2486,7 +2509,7 @@ struct drm_i915_cmd_table {
 
 #define BXT_REVID_A0           (0x0)
 #define BXT_REVID_B0           (0x3)
-#define BXT_REVID_C0           (0x6)
+#define BXT_REVID_C0           (0x9)
 
 /*
  * The genX designation typically refers to the render engine, so render
@@ -2520,7 +2543,8 @@ struct drm_i915_cmd_table {
 #define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
 #define USES_PPGTT(dev)                (i915.enable_ppgtt)
-#define USES_FULL_PPGTT(dev)   (i915.enable_ppgtt == 2)
+#define USES_FULL_PPGTT(dev)   (i915.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev)     (i915.enable_ppgtt == 3)
 
 #define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
@@ -2564,7 +2588,10 @@ struct drm_i915_cmd_table {
 #define HAS_RC6(dev)           (INTEL_INFO(dev)->gen >= 6)
 #define HAS_RC6p(dev)          (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
 
-#define HAS_CSR(dev)   (IS_SKYLAKE(dev))
+#define HAS_CSR(dev)   (IS_GEN9(dev))
+
+#define HAS_GUC_UCODE(dev)     (IS_GEN9(dev))
+#define HAS_GUC_SCHED(dev)     (IS_GEN9(dev))
 
 #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
                                    INTEL_INFO(dev)->gen >= 8)
@@ -2584,6 +2611,7 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
+#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
@@ -2603,8 +2631,8 @@ struct drm_i915_cmd_table {
 extern const struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 
-extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
-extern int i915_resume_legacy(struct drm_device *dev);
+extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
+extern int i915_resume_switcheroo(struct drm_device *dev);
 
 /* i915_params.c */
 struct i915_params {
@@ -2626,7 +2654,6 @@ struct i915_params {
        int enable_cmd_parser;
        /* leave bools at the end to not create holes */
        bool enable_hangcheck;
-       bool fastboot;
        bool prefault_disable;
        bool load_detect_test;
        bool reset;
@@ -2637,6 +2664,7 @@ struct i915_params {
        int use_mmio_flip;
        int mmio_debug;
        bool verbose_state_checks;
+       bool nuclear_pageflip;
        int edp_vswing;
 };
 extern struct i915_params i915 __read_mostly;
@@ -2716,6 +2744,9 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 
 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+                                  uint32_t mask,
+                                  uint32_t bits);
 void
 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
 void
@@ -2783,8 +2814,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
 struct drm_i915_gem_object *i915_gem_object_create_from_data(
                struct drm_device *dev, const void *data, size_t size);
-void i915_init_vm(struct drm_i915_private *dev_priv,
-                 struct i915_address_space *vm);
 void i915_gem_free_object(struct drm_gem_object *obj);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
@@ -2986,13 +3015,11 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                struct drm_gem_object *gem_obj, int flags);
 
-unsigned long
-i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
-                             const struct i915_ggtt_view *view);
-unsigned long
-i915_gem_obj_offset(struct drm_i915_gem_object *o,
-                   struct i915_address_space *vm);
-static inline unsigned long
+u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+                                 const struct i915_ggtt_view *view);
+u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
+                       struct i915_address_space *vm);
+static inline u64
 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
 {
        return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
@@ -3153,6 +3180,10 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
                                struct drm_mm_node *node, u64 size,
                                unsigned alignment);
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+                                        struct drm_mm_node *node, u64 size,
+                                        unsigned alignment, u64 start,
+                                        u64 end);
 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
                                 struct drm_mm_node *node);
 int i915_gem_init_stolen(struct drm_device *dev);
index 4d631a94648194957512d096e89da0e5242bd822..bf5ef7a07878e1caff5e79792c0273f74f219d30 100644 (file)
@@ -1005,12 +1005,14 @@ out:
                if (!needs_clflush_after &&
                    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
                        if (i915_gem_clflush_object(obj, obj->pin_display))
-                               i915_gem_chipset_flush(dev);
+                               needs_clflush_after = true;
                }
        }
 
        if (needs_clflush_after)
                i915_gem_chipset_flush(dev);
+       else
+               obj->cache_dirty = true;
 
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
        return ret;
@@ -1711,8 +1713,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 
 /**
  * i915_gem_fault - fault a page into the GTT
- * vma: VMA in question
- * vmf: fault info
+ * @vma: VMA in question
+ * @vmf: fault info
  *
  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
  * from userspace.  The fault handler takes care of binding the object to
@@ -3228,10 +3230,6 @@ int i915_vma_unbind(struct i915_vma *vma)
        ret = i915_gem_object_wait_rendering(obj, false);
        if (ret)
                return ret;
-       /* Continue on if we fail due to EIO, the GPU is hung so we
-        * should be safe and we need to cleanup or else we might
-        * cause memory corruption through use-after-free.
-        */
 
        if (i915_is_ggtt(vma->vm) &&
            vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
@@ -3355,7 +3353,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 size, fence_size, fence_alignment, unfenced_alignment;
+       u32 fence_alignment, unfenced_alignment;
+       u64 size, fence_size;
        u64 start =
                flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
        u64 end =
@@ -3414,7 +3413,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
         * attempt to find space.
         */
        if (size > end) {
-               DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
+               DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
                          ggtt_view ? ggtt_view->type : 0,
                          size,
                          flags & PIN_MAPPABLE ? "mappable" : "total",
@@ -3638,10 +3637,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        struct i915_vma *vma, *next;
-       int ret;
+       int ret = 0;
 
        if (obj->cache_level == cache_level)
-               return 0;
+               goto out;
 
        if (i915_gem_obj_is_pinned(obj)) {
                DRM_DEBUG("can not change the cache level of pinned objects\n");
@@ -3686,6 +3685,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                vma->node.color = cache_level;
        obj->cache_level = cache_level;
 
+out:
        if (obj->cache_dirty &&
            obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
            cpu_write_needs_clflush(obj)) {
@@ -3738,6 +3738,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
                level = I915_CACHE_NONE;
                break;
        case I915_CACHING_CACHED:
+               /*
+                * Due to a HW issue on BXT A stepping, GPU stores via a
+                * snooped mapping may leave stale data in a corresponding CPU
+                * cacheline, whereas normally such cachelines would get
+                * invalidated.
+                */
+               if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
+                       return -ENODEV;
+
                level = I915_CACHE_LLC;
                break;
        case I915_CACHING_DISPLAY:
@@ -4011,15 +4020,13 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
                        return -EBUSY;
 
                if (i915_vma_misplaced(vma, alignment, flags)) {
-                       unsigned long offset;
-                       offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
-                                            i915_gem_obj_offset(obj, vm);
                        WARN(vma->pin_count,
                             "bo is already pinned in %s with incorrect alignment:"
-                            " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
+                            " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
                             ggtt_view ? "ggtt" : "ppgtt",
-                            offset,
+                            upper_32_bits(vma->node.start),
+                            lower_32_bits(vma->node.start),
                             alignment,
                             !!(flags & PIN_MAPPABLE),
                             obj->map_and_fenceable);
@@ -4602,14 +4609,8 @@ int i915_gem_init_rings(struct drm_device *dev)
                        goto cleanup_vebox_ring;
        }
 
-       ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
-       if (ret)
-               goto cleanup_bsd2_ring;
-
        return 0;
 
-cleanup_bsd2_ring:
-       intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
 cleanup_vebox_ring:
        intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
 cleanup_blt_ring:
@@ -4679,6 +4680,33 @@ i915_gem_init_hw(struct drm_device *dev)
                        goto out;
        }
 
+       /* We can't enable contexts until all firmware is loaded */
+       if (HAS_GUC_UCODE(dev)) {
+               ret = intel_guc_ucode_load(dev);
+               if (ret) {
+                       /*
+                        * If we got an error and GuC submission is enabled, map
+                        * the error to -EIO so the GPU will be declared wedged.
+                        * OTOH, if we didn't intend to use the GuC anyway, just
+                        * discard the error and carry on.
+                        */
+                       DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
+                                 i915.enable_guc_submission ? "" :
+                                 " (ignored)");
+                       ret = i915.enable_guc_submission ? -EIO : 0;
+                       if (ret)
+                               goto out;
+               }
+       }
+
+       /*
+        * Increment the next seqno by 0x100 so we have a visible break
+        * on re-initialisation
+        */
+       ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
+       if (ret)
+               goto out;
+
        /* Now it is safe to go back round and do everything else: */
        for_each_ring(ring, dev_priv, i) {
                struct drm_i915_gem_request *req;
@@ -4816,18 +4844,6 @@ init_ring_lists(struct intel_engine_cs *ring)
        INIT_LIST_HEAD(&ring->request_list);
 }
 
-void i915_init_vm(struct drm_i915_private *dev_priv,
-                 struct i915_address_space *vm)
-{
-       if (!i915_is_ggtt(vm))
-               drm_mm_init(&vm->mm, vm->start, vm->total);
-       vm->dev = dev_priv->dev;
-       INIT_LIST_HEAD(&vm->active_list);
-       INIT_LIST_HEAD(&vm->inactive_list);
-       INIT_LIST_HEAD(&vm->global_link);
-       list_add_tail(&vm->global_link, &dev_priv->vm_list);
-}
-
 void
 i915_gem_load(struct drm_device *dev)
 {
@@ -4851,8 +4867,6 @@ i915_gem_load(struct drm_device *dev)
                                  NULL);
 
        INIT_LIST_HEAD(&dev_priv->vm_list);
-       i915_init_vm(dev_priv, &dev_priv->gtt.base);
-
        INIT_LIST_HEAD(&dev_priv->context_list);
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
@@ -4880,6 +4894,14 @@ i915_gem_load(struct drm_device *dev)
                dev_priv->num_fence_regs =
                                I915_READ(vgtif_reg(avail_rs.fence_num));
 
+       /*
+        * Set initial sequence number for requests.
+        * Using this number allows the wraparound to happen early,
+        * catching any obvious problems.
+        */
+       dev_priv->next_seqno = ((u32)~0 - 0x1100);
+       dev_priv->last_seqno = ((u32)~0 - 0x1101);
+
        /* Initialize fence registers to zero */
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        i915_gem_restore_fences(dev);
@@ -4949,9 +4971,9 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
 
 /**
  * i915_gem_track_fb - update frontbuffer tracking
- * old: current GEM buffer for the frontbuffer slots
- * new: new GEM buffer for the frontbuffer slots
- * frontbuffer_bits: bitmask of frontbuffer slots
+ * @old: current GEM buffer for the frontbuffer slots
+ * @new: new GEM buffer for the frontbuffer slots
+ * @frontbuffer_bits: bitmask of frontbuffer slots
  *
  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
  * from @old and setting them in @new. Both @old and @new can be NULL.
@@ -4974,9 +4996,8 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 }
 
 /* All the new VM stuff */
-unsigned long
-i915_gem_obj_offset(struct drm_i915_gem_object *o,
-                   struct i915_address_space *vm)
+u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
+                       struct i915_address_space *vm)
 {
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
@@ -4996,9 +5017,8 @@ i915_gem_obj_offset(struct drm_i915_gem_object *o,
        return -1;
 }
 
-unsigned long
-i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
-                             const struct i915_ggtt_view *view)
+u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+                                 const struct i915_ggtt_view *view)
 {
        struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
        struct i915_vma *vma;
index 8e893b354bccdfb5205dea4b4e3559d02225fe54..74aa0c9929ba3fe3fc2cc22230b4f16421ecf37e 100644 (file)
@@ -332,6 +332,13 @@ int i915_gem_context_init(struct drm_device *dev)
        if (WARN_ON(dev_priv->ring[RCS].default_context))
                return 0;
 
+       if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+               if (!i915.enable_execlists) {
+                       DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
+                       return -EINVAL;
+               }
+       }
+
        if (i915.enable_execlists) {
                /* NB: intentionally left blank. We will allocate our own
                 * backing objects as we need them, thank you very much */
index a953d4975b8c08d237bac9aa4a7ecd3eaf29c28e..67ef118ee1605766362c06cad1cc1612ff1fd3e9 100644 (file)
@@ -1009,7 +1009,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
        }
 
        if (i915.enable_execlists && !ctx->engine[ring->id].state) {
-               int ret = intel_lr_context_deferred_create(ctx, ring);
+               int ret = intel_lr_context_deferred_alloc(ctx, ring);
                if (ret) {
                        DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
                        return ERR_PTR(ret);
index af1f8c461060ffb06b82969db0ceb82f7b490986..ab80f7370ab7f2f62a0da9fa06c7ea536a598364 100644 (file)
@@ -128,7 +128,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
                WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
                     (size & -size) != size ||
                     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-                    "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+                    "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
                     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
 
                if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
@@ -171,7 +171,7 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
                WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
                     (size & -size) != size ||
                     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
-                    "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+                    "object 0x%08llx not 512K or pot-size 0x%08x aligned\n",
                     i915_gem_obj_ggtt_offset(obj), size);
 
                pitch_val = obj->stride / 128;
@@ -322,7 +322,7 @@ i915_find_fence_reg(struct drm_device *dev)
 
        /* First try to find a free reg */
        avail = NULL;
-       for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+       for (i = 0; i < dev_priv->num_fence_regs; i++) {
                reg = &dev_priv->fence_regs[i];
                if (!reg->obj)
                        return reg;
index 96054a560f4f8da59f40a1fc532013a1b7c5b374..47344d068f9a2cfc266287f77864fe3c37dae14d 100644 (file)
@@ -204,6 +204,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
        return pde;
 }
 
+#define gen8_pdpe_encode gen8_pde_encode
+#define gen8_pml4e_encode gen8_pde_encode
+
 static gen6_pte_t snb_pte_encode(dma_addr_t addr,
                                 enum i915_cache_level level,
                                 bool valid, u32 unused)
@@ -522,6 +525,127 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
        fill_px(vm->dev, pd, scratch_pde);
 }
 
+static int __pdp_init(struct drm_device *dev,
+                     struct i915_page_directory_pointer *pdp)
+{
+       size_t pdpes = I915_PDPES_PER_PDP(dev);
+
+       pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
+                                 sizeof(unsigned long),
+                                 GFP_KERNEL);
+       if (!pdp->used_pdpes)
+               return -ENOMEM;
+
+       pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
+                                     GFP_KERNEL);
+       if (!pdp->page_directory) {
+               kfree(pdp->used_pdpes);
+               /* the PDP might be the statically allocated top level. Keep it
+                * as clean as possible */
+               pdp->used_pdpes = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void __pdp_fini(struct i915_page_directory_pointer *pdp)
+{
+       kfree(pdp->used_pdpes);
+       kfree(pdp->page_directory);
+       pdp->page_directory = NULL;
+}
+
+static struct
+i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
+{
+       struct i915_page_directory_pointer *pdp;
+       int ret = -ENOMEM;
+
+       WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
+
+       pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
+       if (!pdp)
+               return ERR_PTR(-ENOMEM);
+
+       ret = __pdp_init(dev, pdp);
+       if (ret)
+               goto fail_bitmap;
+
+       ret = setup_px(dev, pdp);
+       if (ret)
+               goto fail_page_m;
+
+       return pdp;
+
+fail_page_m:
+       __pdp_fini(pdp);
+fail_bitmap:
+       kfree(pdp);
+
+       return ERR_PTR(ret);
+}
+
+static void free_pdp(struct drm_device *dev,
+                    struct i915_page_directory_pointer *pdp)
+{
+       __pdp_fini(pdp);
+       if (USES_FULL_48BIT_PPGTT(dev)) {
+               cleanup_px(dev, pdp);
+               kfree(pdp);
+       }
+}
+
+static void gen8_initialize_pdp(struct i915_address_space *vm,
+                               struct i915_page_directory_pointer *pdp)
+{
+       gen8_ppgtt_pdpe_t scratch_pdpe;
+
+       scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
+
+       fill_px(vm->dev, pdp, scratch_pdpe);
+}
+
+static void gen8_initialize_pml4(struct i915_address_space *vm,
+                                struct i915_pml4 *pml4)
+{
+       gen8_ppgtt_pml4e_t scratch_pml4e;
+
+       scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
+                                         I915_CACHE_LLC);
+
+       fill_px(vm->dev, pml4, scratch_pml4e);
+}
+
+static void
+gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
+                         struct i915_page_directory_pointer *pdp,
+                         struct i915_page_directory *pd,
+                         int index)
+{
+       gen8_ppgtt_pdpe_t *page_directorypo;
+
+       if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+               return;
+
+       page_directorypo = kmap_px(pdp);
+       page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
+       kunmap_px(ppgtt, page_directorypo);
+}
+
+static void
+gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
+                                 struct i915_pml4 *pml4,
+                                 struct i915_page_directory_pointer *pdp,
+                                 int index)
+{
+       gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
+
+       WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
+       pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
+       kunmap_px(ppgtt, pagemap);
+}
+
 /* Broadwell Page Directory Pointer Descriptors */
 static int gen8_write_pdp(struct drm_i915_gem_request *req,
                          unsigned entry,
@@ -547,8 +671,8 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
        return 0;
 }
 
-static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct drm_i915_gem_request *req)
+static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                                struct drm_i915_gem_request *req)
 {
        int i, ret;
 
@@ -563,31 +687,38 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
        return 0;
 }
 
-static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
-                                  uint64_t start,
-                                  uint64_t length,
-                                  bool use_scratch)
+static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
+                             struct drm_i915_gem_request *req)
+{
+       return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
+}
+
+static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
+                                      struct i915_page_directory_pointer *pdp,
+                                      uint64_t start,
+                                      uint64_t length,
+                                      gen8_pte_t scratch_pte)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
-       gen8_pte_t *pt_vaddr, scratch_pte;
-       unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
-       unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
-       unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
+       gen8_pte_t *pt_vaddr;
+       unsigned pdpe = gen8_pdpe_index(start);
+       unsigned pde = gen8_pde_index(start);
+       unsigned pte = gen8_pte_index(start);
        unsigned num_entries = length >> PAGE_SHIFT;
        unsigned last_pte, i;
 
-       scratch_pte = gen8_pte_encode(px_dma(ppgtt->base.scratch_page),
-                                     I915_CACHE_LLC, use_scratch);
+       if (WARN_ON(!pdp))
+               return;
 
        while (num_entries) {
                struct i915_page_directory *pd;
                struct i915_page_table *pt;
 
-               if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
+               if (WARN_ON(!pdp->page_directory[pdpe]))
                        break;
 
-               pd = ppgtt->pdp.page_directory[pdpe];
+               pd = pdp->page_directory[pdpe];
 
                if (WARN_ON(!pd->page_table[pde]))
                        break;
@@ -612,45 +743,69 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 
                pte = 0;
                if (++pde == I915_PDES) {
-                       pdpe++;
+                       if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+                               break;
                        pde = 0;
                }
        }
 }
 
-static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
-                                     struct sg_table *pages,
-                                     uint64_t start,
-                                     enum i915_cache_level cache_level, u32 unused)
+static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+                                  uint64_t start,
+                                  uint64_t length,
+                                  bool use_scratch)
+{
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+       gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+                                                I915_CACHE_LLC, use_scratch);
+
+       if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+               gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
+                                          scratch_pte);
+       } else {
+               uint64_t templ4, pml4e;
+               struct i915_page_directory_pointer *pdp;
+
+               gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
+                       gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
+                                                  scratch_pte);
+               }
+       }
+}
+
+static void
+gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
+                             struct i915_page_directory_pointer *pdp,
+                             struct sg_page_iter *sg_iter,
+                             uint64_t start,
+                             enum i915_cache_level cache_level)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
        gen8_pte_t *pt_vaddr;
-       unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
-       unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
-       unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
-       struct sg_page_iter sg_iter;
+       unsigned pdpe = gen8_pdpe_index(start);
+       unsigned pde = gen8_pde_index(start);
+       unsigned pte = gen8_pte_index(start);
 
        pt_vaddr = NULL;
 
-       for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
-               if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
-                       break;
-
+       while (__sg_page_iter_next(sg_iter)) {
                if (pt_vaddr == NULL) {
-                       struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
+                       struct i915_page_directory *pd = pdp->page_directory[pdpe];
                        struct i915_page_table *pt = pd->page_table[pde];
                        pt_vaddr = kmap_px(pt);
                }
 
                pt_vaddr[pte] =
-                       gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
+                       gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
                                        cache_level, true);
                if (++pte == GEN8_PTES) {
                        kunmap_px(ppgtt, pt_vaddr);
                        pt_vaddr = NULL;
                        if (++pde == I915_PDES) {
-                               pdpe++;
+                               if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
+                                       break;
                                pde = 0;
                        }
                        pte = 0;
@@ -661,6 +816,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
                kunmap_px(ppgtt, pt_vaddr);
 }
 
+static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
+                                     struct sg_table *pages,
+                                     uint64_t start,
+                                     enum i915_cache_level cache_level,
+                                     u32 unused)
+{
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+       struct sg_page_iter sg_iter;
+
+       __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
+
+       if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+               gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
+                                             cache_level);
+       } else {
+               struct i915_page_directory_pointer *pdp;
+               uint64_t templ4, pml4e;
+               uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
+
+               gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
+                       gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
+                                                     start, cache_level);
+               }
+       }
+}
+
 static void gen8_free_page_tables(struct drm_device *dev,
                                  struct i915_page_directory *pd)
 {
@@ -699,8 +881,55 @@ static int gen8_init_scratch(struct i915_address_space *vm)
                return PTR_ERR(vm->scratch_pd);
        }
 
+       if (USES_FULL_48BIT_PPGTT(dev)) {
+               vm->scratch_pdp = alloc_pdp(dev);
+               if (IS_ERR(vm->scratch_pdp)) {
+                       free_pd(dev, vm->scratch_pd);
+                       free_pt(dev, vm->scratch_pt);
+                       free_scratch_page(dev, vm->scratch_page);
+                       return PTR_ERR(vm->scratch_pdp);
+               }
+       }
+
        gen8_initialize_pt(vm, vm->scratch_pt);
        gen8_initialize_pd(vm, vm->scratch_pd);
+       if (USES_FULL_48BIT_PPGTT(dev))
+               gen8_initialize_pdp(vm, vm->scratch_pdp);
+
+       return 0;
+}
+
+static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
+{
+       enum vgt_g2v_type msg;
+       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned int offset = vgtif_reg(pdp0_lo);
+       int i;
+
+       if (USES_FULL_48BIT_PPGTT(dev)) {
+               u64 daddr = px_dma(&ppgtt->pml4);
+
+               I915_WRITE(offset, lower_32_bits(daddr));
+               I915_WRITE(offset + 4, upper_32_bits(daddr));
+
+               msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
+                               VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
+       } else {
+               for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
+                       u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+                       I915_WRITE(offset, lower_32_bits(daddr));
+                       I915_WRITE(offset + 4, upper_32_bits(daddr));
+
+                       offset += 8;
+               }
+
+               msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
+                               VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
+       }
+
+       I915_WRITE(vgtif_reg(g2v_notify), msg);
 
        return 0;
 }
@@ -709,35 +938,65 @@ static void gen8_free_scratch(struct i915_address_space *vm)
 {
        struct drm_device *dev = vm->dev;
 
+       if (USES_FULL_48BIT_PPGTT(dev))
+               free_pdp(dev, vm->scratch_pdp);
        free_pd(dev, vm->scratch_pd);
        free_pt(dev, vm->scratch_pt);
        free_scratch_page(dev, vm->scratch_page);
 }
 
-static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
+                                   struct i915_page_directory_pointer *pdp)
+{
+       int i;
+
+       for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
+               if (WARN_ON(!pdp->page_directory[i]))
+                       continue;
+
+               gen8_free_page_tables(dev, pdp->page_directory[i]);
+               free_pd(dev, pdp->page_directory[i]);
+       }
+
+       free_pdp(dev, pdp);
+}
+
+static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
 {
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(vm, struct i915_hw_ppgtt, base);
        int i;
 
-       for_each_set_bit(i, ppgtt->pdp.used_pdpes, GEN8_LEGACY_PDPES) {
-               if (WARN_ON(!ppgtt->pdp.page_directory[i]))
+       for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
+               if (WARN_ON(!ppgtt->pml4.pdps[i]))
                        continue;
 
-               gen8_free_page_tables(ppgtt->base.dev,
-                                     ppgtt->pdp.page_directory[i]);
-               free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
+               gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
        }
 
+       cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
+}
+
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+{
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+
+       if (intel_vgpu_active(vm->dev))
+               gen8_ppgtt_notify_vgt(ppgtt, false);
+
+       if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+               gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
+       else
+               gen8_ppgtt_cleanup_4lvl(ppgtt);
+
        gen8_free_scratch(vm);
 }
 
 /**
  * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
- * @ppgtt:     Master ppgtt structure.
- * @pd:                Page directory for this address range.
+ * @vm:        Master vm structure.
+ * @pd:        Page directory for this address range.
  * @start:     Starting virtual address to begin allocations.
- * @length     Size of the allocations.
+ * @length:    Size of the allocations.
  * @new_pts:   Bitmap set by function with new allocations. Likely used by the
  *             caller to free on error.
  *
@@ -750,22 +1009,22 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
+static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
                                     struct i915_page_directory *pd,
                                     uint64_t start,
                                     uint64_t length,
                                     unsigned long *new_pts)
 {
-       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_device *dev = vm->dev;
        struct i915_page_table *pt;
        uint64_t temp;
        uint32_t pde;
 
        gen8_for_each_pde(pt, pd, start, length, temp, pde) {
                /* Don't reallocate page tables */
-               if (pt) {
+               if (test_bit(pde, pd->used_pdes)) {
                        /* Scratch is never allocated this way */
-                       WARN_ON(pt == ppgtt->base.scratch_pt);
+                       WARN_ON(pt == vm->scratch_pt);
                        continue;
                }
 
@@ -773,9 +1032,10 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
                if (IS_ERR(pt))
                        goto unwind_out;
 
-               gen8_initialize_pt(&ppgtt->base, pt);
+               gen8_initialize_pt(vm, pt);
                pd->page_table[pde] = pt;
                __set_bit(pde, new_pts);
+               trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
        }
 
        return 0;
@@ -789,11 +1049,11 @@ unwind_out:
 
 /**
  * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
- * @ppgtt:     Master ppgtt structure.
+ * @vm:        Master vm structure.
  * @pdp:       Page directory pointer for this address range.
  * @start:     Starting virtual address to begin allocations.
- * @length     Size of the allocations.
- * @new_pds    Bitmap set by function with new allocations. Likely used by the
+ * @length:    Size of the allocations.
+ * @new_pds:   Bitmap set by function with new allocations. Likely used by the
  *             caller to free on error.
  *
  * Allocate the required number of page directories starting at the pde index of
@@ -810,48 +1070,102 @@ unwind_out:
  *
  * Return: 0 if success; negative error code otherwise.
  */
-static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
-                                    struct i915_page_directory_pointer *pdp,
-                                    uint64_t start,
-                                    uint64_t length,
-                                    unsigned long *new_pds)
+static int
+gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
+                                 struct i915_page_directory_pointer *pdp,
+                                 uint64_t start,
+                                 uint64_t length,
+                                 unsigned long *new_pds)
 {
-       struct drm_device *dev = ppgtt->base.dev;
+       struct drm_device *dev = vm->dev;
        struct i915_page_directory *pd;
        uint64_t temp;
        uint32_t pdpe;
+       uint32_t pdpes = I915_PDPES_PER_PDP(dev);
 
-       WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
+       WARN_ON(!bitmap_empty(new_pds, pdpes));
 
        gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
-               if (pd)
+               if (test_bit(pdpe, pdp->used_pdpes))
                        continue;
 
                pd = alloc_pd(dev);
                if (IS_ERR(pd))
                        goto unwind_out;
 
-               gen8_initialize_pd(&ppgtt->base, pd);
+               gen8_initialize_pd(vm, pd);
                pdp->page_directory[pdpe] = pd;
                __set_bit(pdpe, new_pds);
+               trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
        }
 
        return 0;
 
 unwind_out:
-       for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES)
+       for_each_set_bit(pdpe, new_pds, pdpes)
                free_pd(dev, pdp->page_directory[pdpe]);
 
        return -ENOMEM;
 }
 
-static void
-free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts)
+/**
+ * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
+ * @vm:        Master vm structure.
+ * @pml4:      Page map level 4 for this address range.
+ * @start:     Starting virtual address to begin allocations.
+ * @length:    Size of the allocations.
+ * @new_pdps:  Bitmap set by function with new allocations. Likely used by the
+ *             caller to free on error.
+ *
+ * Allocate the required number of page directory pointers. Extremely similar to
+ * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
+ * The main difference is here we are limited by the pml4 boundary (instead of
+ * the page directory pointer).
+ *
+ * Return: 0 if success; negative error code otherwise.
+ */
+static int
+gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
+                                 struct i915_pml4 *pml4,
+                                 uint64_t start,
+                                 uint64_t length,
+                                 unsigned long *new_pdps)
 {
-       int i;
+       struct drm_device *dev = vm->dev;
+       struct i915_page_directory_pointer *pdp;
+       uint64_t temp;
+       uint32_t pml4e;
+
+       WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
+
+       gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
+               if (!test_bit(pml4e, pml4->used_pml4es)) {
+                       pdp = alloc_pdp(dev);
+                       if (IS_ERR(pdp))
+                               goto unwind_out;
+
+                       gen8_initialize_pdp(vm, pdp);
+                       pml4->pdps[pml4e] = pdp;
+                       __set_bit(pml4e, new_pdps);
+                       trace_i915_page_directory_pointer_entry_alloc(vm,
+                                                                     pml4e,
+                                                                     start,
+                                                                     GEN8_PML4E_SHIFT);
+               }
+       }
+
+       return 0;
+
+unwind_out:
+       for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
+               free_pdp(dev, pml4->pdps[pml4e]);
+
+       return -ENOMEM;
+}
 
-       for (i = 0; i < GEN8_LEGACY_PDPES; i++)
-               kfree(new_pts[i]);
+static void
+free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
+{
        kfree(new_pts);
        kfree(new_pds);
 }
@@ -861,28 +1175,20 @@ free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts)
  */
 static
 int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
-                                        unsigned long ***new_pts)
+                                        unsigned long **new_pts,
+                                        uint32_t pdpes)
 {
-       int i;
        unsigned long *pds;
-       unsigned long **pts;
+       unsigned long *pts;
 
-       pds = kcalloc(BITS_TO_LONGS(GEN8_LEGACY_PDPES), sizeof(unsigned long), GFP_KERNEL);
+       pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
        if (!pds)
                return -ENOMEM;
 
-       pts = kcalloc(GEN8_LEGACY_PDPES, sizeof(unsigned long *), GFP_KERNEL);
-       if (!pts) {
-               kfree(pds);
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
-               pts[i] = kcalloc(BITS_TO_LONGS(I915_PDES),
-                                sizeof(unsigned long), GFP_KERNEL);
-               if (!pts[i])
-                       goto err_out;
-       }
+       pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
+                     GFP_TEMPORARY);
+       if (!pts)
+               goto err_out;
 
        *new_pds = pds;
        *new_pts = pts;
@@ -904,18 +1210,21 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
        ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
 }
 
-static int gen8_alloc_va_range(struct i915_address_space *vm,
-                              uint64_t start,
-                              uint64_t length)
+static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
+                                   struct i915_page_directory_pointer *pdp,
+                                   uint64_t start,
+                                   uint64_t length)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
-       unsigned long *new_page_dirs, **new_page_tables;
+       unsigned long *new_page_dirs, *new_page_tables;
+       struct drm_device *dev = vm->dev;
        struct i915_page_directory *pd;
        const uint64_t orig_start = start;
        const uint64_t orig_length = length;
        uint64_t temp;
        uint32_t pdpe;
+       uint32_t pdpes = I915_PDPES_PER_PDP(dev);
        int ret;
 
        /* Wrap is never okay since we can only represent 48b, and we don't
@@ -924,25 +1233,25 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
        if (WARN_ON(start + length < start))
                return -ENODEV;
 
-       if (WARN_ON(start + length > ppgtt->base.total))
+       if (WARN_ON(start + length > vm->total))
                return -ENODEV;
 
-       ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables);
+       ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
        if (ret)
                return ret;
 
        /* Do the allocations first so we can easily bail out */
-       ret = gen8_ppgtt_alloc_page_directories(ppgtt, &ppgtt->pdp, start, length,
-                                       new_page_dirs);
+       ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
+                                               new_page_dirs);
        if (ret) {
                free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
                return ret;
        }
 
        /* For every page directory referenced, allocate page tables */
-       gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
-               ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length,
-                                               new_page_tables[pdpe]);
+       gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
+               ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
+                                               new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
                if (ret)
                        goto err_out;
        }
@@ -952,10 +1261,10 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 
        /* Allocations have completed successfully, so set the bitmaps, and do
         * the mappings. */
-       gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+       gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
                gen8_pde_t *const page_directory = kmap_px(pd);
                struct i915_page_table *pt;
-               uint64_t pd_len = gen8_clamp_pd(start, length);
+               uint64_t pd_len = length;
                uint64_t pd_start = start;
                uint32_t pde;
 
@@ -979,14 +1288,18 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
                        /* Map the PDE to the page table */
                        page_directory[pde] = gen8_pde_encode(px_dma(pt),
                                                              I915_CACHE_LLC);
+                       trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
+                                                       gen8_pte_index(start),
+                                                       gen8_pte_count(start, length),
+                                                       GEN8_PTES);
 
                        /* NB: We haven't yet mapped ptes to pages. At this
                         * point we're still relying on insert_entries() */
                }
 
                kunmap_px(ppgtt, page_directory);
-
-               __set_bit(pdpe, ppgtt->pdp.used_pdpes);
+               __set_bit(pdpe, pdp->used_pdpes);
+               gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
        }
 
        free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
@@ -995,18 +1308,191 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
 
 err_out:
        while (pdpe--) {
-               for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES)
-                       free_pt(vm->dev, ppgtt->pdp.page_directory[pdpe]->page_table[temp]);
+               for_each_set_bit(temp, new_page_tables + pdpe *
+                               BITS_TO_LONGS(I915_PDES), I915_PDES)
+                       free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
        }
 
-       for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES)
-               free_pd(vm->dev, ppgtt->pdp.page_directory[pdpe]);
+       for_each_set_bit(pdpe, new_page_dirs, pdpes)
+               free_pd(dev, pdp->page_directory[pdpe]);
 
        free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
        mark_tlbs_dirty(ppgtt);
        return ret;
 }
 
+static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
+                                   struct i915_pml4 *pml4,
+                                   uint64_t start,
+                                   uint64_t length)
+{
+       DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
+       struct i915_hw_ppgtt *ppgtt =
+                       container_of(vm, struct i915_hw_ppgtt, base);
+       struct i915_page_directory_pointer *pdp;
+       uint64_t temp, pml4e;
+       int ret = 0;
+
+       /* Do the pml4 allocations first, so we don't need to track the newly
+        * allocated tables below the pdp */
+       bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
+
+       /* The pagedirectory and pagetable allocations are done in the shared 3
+        * and 4 level code. Just allocate the pdps.
+        */
+       ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
+                                               new_pdps);
+       if (ret)
+               return ret;
+
+       WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
+            "The allocation has spanned more than 512GB. "
+            "It is highly likely this is incorrect.");
+
+       gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
+               WARN_ON(!pdp);
+
+               ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
+               if (ret)
+                       goto err_out;
+
+               gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
+       }
+
+       bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
+                 GEN8_PML4ES_PER_PML4);
+
+       return 0;
+
+err_out:
+       for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
+               gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
+
+       return ret;
+}
+
+static int gen8_alloc_va_range(struct i915_address_space *vm,
+                              uint64_t start, uint64_t length)
+{
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+
+       if (USES_FULL_48BIT_PPGTT(vm->dev))
+               return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+       else
+               return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+}
+
+static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
+                         uint64_t start, uint64_t length,
+                         gen8_pte_t scratch_pte,
+                         struct seq_file *m)
+{
+       struct i915_page_directory *pd;
+       uint64_t temp;
+       uint32_t pdpe;
+
+       gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
+               struct i915_page_table *pt;
+               uint64_t pd_len = length;
+               uint64_t pd_start = start;
+               uint32_t pde;
+
+               if (!test_bit(pdpe, pdp->used_pdpes))
+                       continue;
+
+               seq_printf(m, "\tPDPE #%d\n", pdpe);
+               gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
+                       uint32_t  pte;
+                       gen8_pte_t *pt_vaddr;
+
+                       if (!test_bit(pde, pd->used_pdes))
+                               continue;
+
+                       pt_vaddr = kmap_px(pt);
+                       for (pte = 0; pte < GEN8_PTES; pte += 4) {
+                               uint64_t va =
+                                       (pdpe << GEN8_PDPE_SHIFT) |
+                                       (pde << GEN8_PDE_SHIFT) |
+                                       (pte << GEN8_PTE_SHIFT);
+                               int i;
+                               bool found = false;
+
+                               for (i = 0; i < 4; i++)
+                                       if (pt_vaddr[pte + i] != scratch_pte)
+                                               found = true;
+                               if (!found)
+                                       continue;
+
+                               seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
+                               for (i = 0; i < 4; i++) {
+                                       if (pt_vaddr[pte + i] != scratch_pte)
+                                               seq_printf(m, " %llx", pt_vaddr[pte + i]);
+                                       else
+                                               seq_puts(m, "  SCRATCH ");
+                               }
+                               seq_puts(m, "\n");
+                       }
+                       /* don't use kunmap_px, it could trigger
+                        * an unnecessary flush.
+                        */
+                       kunmap_atomic(pt_vaddr);
+               }
+       }
+}
+
+static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+{
+       struct i915_address_space *vm = &ppgtt->base;
+       uint64_t start = ppgtt->base.start;
+       uint64_t length = ppgtt->base.total;
+       gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+                                                I915_CACHE_LLC, true);
+
+       if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+               gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
+       } else {
+               uint64_t templ4, pml4e;
+               struct i915_pml4 *pml4 = &ppgtt->pml4;
+               struct i915_page_directory_pointer *pdp;
+
+               gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) {
+                       if (!test_bit(pml4e, pml4->used_pml4es))
+                               continue;
+
+                       seq_printf(m, "    PML4E #%llu\n", pml4e);
+                       gen8_dump_pdp(pdp, start, length, scratch_pte, m);
+               }
+       }
+}
+
+static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
+{
+       unsigned long *new_page_dirs, *new_page_tables;
+       uint32_t pdpes = I915_PDPES_PER_PDP(dev);
+       int ret;
+
+       /* We allocate temp bitmap for page tables for no gain
+        * but as this is for init only, lets keep the things simple
+        */
+       ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
+       if (ret)
+               return ret;
+
+       /* Allocate for all pdps regardless of how the ppgtt
+        * was defined.
+        */
+       ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
+                                               0, 1ULL << 32,
+                                               new_page_dirs);
+       if (!ret)
+               *ppgtt->pdp.used_pdpes = *new_page_dirs;
+
+       free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+
+       return ret;
+}
+
 /*
  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
  * with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1023,24 +1509,49 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
                return ret;
 
        ppgtt->base.start = 0;
-       ppgtt->base.total = 1ULL << 32;
-       if (IS_ENABLED(CONFIG_X86_32))
-               /* While we have a proliferation of size_t variables
-                * we cannot represent the full ppgtt size on 32bit,
-                * so limit it to the same size as the GGTT (currently
-                * 2GiB).
-                */
-               ppgtt->base.total = to_i915(ppgtt->base.dev)->gtt.base.total;
        ppgtt->base.cleanup = gen8_ppgtt_cleanup;
        ppgtt->base.allocate_va_range = gen8_alloc_va_range;
        ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
        ppgtt->base.clear_range = gen8_ppgtt_clear_range;
        ppgtt->base.unbind_vma = ppgtt_unbind_vma;
        ppgtt->base.bind_vma = ppgtt_bind_vma;
+       ppgtt->debug_dump = gen8_dump_ppgtt;
+
+       if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+               ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
+               if (ret)
+                       goto free_scratch;
+
+               gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+
+               ppgtt->base.total = 1ULL << 48;
+               ppgtt->switch_mm = gen8_48b_mm_switch;
+       } else {
+               ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
+               if (ret)
+                       goto free_scratch;
+
+               ppgtt->base.total = 1ULL << 32;
+               ppgtt->switch_mm = gen8_legacy_mm_switch;
+               trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
+                                                             0, 0,
+                                                             GEN8_PML4E_SHIFT);
+
+               if (intel_vgpu_active(ppgtt->base.dev)) {
+                       ret = gen8_preallocate_top_level_pdps(ppgtt);
+                       if (ret)
+                               goto free_scratch;
+               }
+       }
 
-       ppgtt->switch_mm = gen8_mm_switch;
+       if (intel_vgpu_active(ppgtt->base.dev))
+               gen8_ppgtt_notify_vgt(ppgtt, true);
 
        return 0;
+
+free_scratch:
+       gen8_free_scratch(&ppgtt->base);
+       return ret;
 }
 
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
@@ -1228,8 +1739,9 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
        int j;
 
        for_each_ring(ring, dev_priv, j) {
+               u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
                I915_WRITE(RING_MODE_GEN7(ring),
-                          _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+                          _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
        }
 }
 
@@ -1609,6 +2121,16 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
                return gen8_ppgtt_init(ppgtt);
 }
 
+static void i915_address_space_init(struct i915_address_space *vm,
+                                   struct drm_i915_private *dev_priv)
+{
+       drm_mm_init(&vm->mm, vm->start, vm->total);
+       vm->dev = dev_priv->dev;
+       INIT_LIST_HEAD(&vm->active_list);
+       INIT_LIST_HEAD(&vm->inactive_list);
+       list_add_tail(&vm->global_link, &dev_priv->vm_list);
+}
+
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1617,9 +2139,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
        ret = __hw_ppgtt_init(dev, ppgtt);
        if (ret == 0) {
                kref_init(&ppgtt->ref);
-               drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
-                           ppgtt->base.total);
-               i915_init_vm(dev_priv, &ppgtt->base);
+               i915_address_space_init(&ppgtt->base, dev_priv);
        }
 
        return ret;
@@ -2013,7 +2533,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
                 * the bound flag ourselves.
                 */
                vma->bound |= GLOBAL_BIND;
-
        }
 
        if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@@ -2084,9 +2603,9 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
 }
 
 static int i915_gem_setup_global_gtt(struct drm_device *dev,
-                                    unsigned long start,
-                                    unsigned long mappable_end,
-                                    unsigned long end)
+                                    u64 start,
+                                    u64 mappable_end,
+                                    u64 end)
 {
        /* Let GEM Manage all of the aperture.
         *
@@ -2106,11 +2625,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 
        BUG_ON(mappable_end > end);
 
-       /* Subtract the guard page ... */
-       drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
+       ggtt_vm->start = start;
 
-       dev_priv->gtt.base.start = start;
-       dev_priv->gtt.base.total = end - start;
+       /* Subtract the guard page before address space initialization to
+        * shrink the range used by drm_mm */
+       ggtt_vm->total = end - start - PAGE_SIZE;
+       i915_address_space_init(ggtt_vm, dev_priv);
+       ggtt_vm->total += PAGE_SIZE;
 
        if (intel_vgpu_active(dev)) {
                ret = intel_vgt_balloon(dev);
@@ -2119,13 +2640,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
        }
 
        if (!HAS_LLC(dev))
-               dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
+               ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
 
        /* Mark any preallocated objects as occupied */
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
 
-               DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
+               DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
                              i915_gem_obj_ggtt_offset(obj), obj->base.size);
 
                WARN_ON(i915_gem_obj_ggtt_bound(obj));
@@ -2135,6 +2656,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
                        return ret;
                }
                vma->bound |= GLOBAL_BIND;
+               list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
        }
 
        /* Clear any non-preallocated blocks */
@@ -2722,15 +3244,18 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 
 }
 
-static void
-rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
-            struct sg_table *st)
+static struct scatterlist *
+rotate_pages(dma_addr_t *in, unsigned int offset,
+            unsigned int width, unsigned int height,
+            struct sg_table *st, struct scatterlist *sg)
 {
        unsigned int column, row;
        unsigned int src_idx;
-       struct scatterlist *sg = st->sgl;
 
-       st->nents = 0;
+       if (!sg) {
+               st->nents = 0;
+               sg = st->sgl;
+       }
 
        for (column = 0; column < width; column++) {
                src_idx = width * (height - 1) + column;
@@ -2741,12 +3266,14 @@ rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
                         * The only thing we need are DMA addresses.
                         */
                        sg_set_page(sg, NULL, PAGE_SIZE, 0);
-                       sg_dma_address(sg) = in[src_idx];
+                       sg_dma_address(sg) = in[offset + src_idx];
                        sg_dma_len(sg) = PAGE_SIZE;
                        sg = sg_next(sg);
                        src_idx -= width;
                }
        }
+
+       return sg;
 }
 
 static struct sg_table *
@@ -2755,10 +3282,13 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
 {
        struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
        unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
+       unsigned int size_pages_uv;
        struct sg_page_iter sg_iter;
        unsigned long i;
        dma_addr_t *page_addr_list;
        struct sg_table *st;
+       unsigned int uv_start_page;
+       struct scatterlist *sg;
        int ret = -ENOMEM;
 
        /* Allocate a temporary list of source pages for random access. */
@@ -2767,12 +3297,18 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
        if (!page_addr_list)
                return ERR_PTR(ret);
 
+       /* Account for UV plane with NV12. */
+       if (rot_info->pixel_format == DRM_FORMAT_NV12)
+               size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
+       else
+               size_pages_uv = 0;
+
        /* Allocate target SG list. */
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (!st)
                goto err_st_alloc;
 
-       ret = sg_alloc_table(st, size_pages, GFP_KERNEL);
+       ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
        if (ret)
                goto err_sg_alloc;
 
@@ -2784,15 +3320,32 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
        }
 
        /* Rotate the pages. */
-       rotate_pages(page_addr_list,
+       sg = rotate_pages(page_addr_list, 0,
                     rot_info->width_pages, rot_info->height_pages,
-                    st);
+                    st, NULL);
+
+       /* Append the UV plane if NV12. */
+       if (rot_info->pixel_format == DRM_FORMAT_NV12) {
+               uv_start_page = size_pages;
+
+               /* Check for tile-row un-alignment. */
+               if (offset_in_page(rot_info->uv_offset))
+                       uv_start_page--;
+
+               rot_info->uv_start_page = uv_start_page;
+
+               rotate_pages(page_addr_list, uv_start_page,
+                            rot_info->width_pages_uv,
+                            rot_info->height_pages_uv,
+                            st, sg);
+       }
 
        DRM_DEBUG_KMS(
-                     "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n",
+                     "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
                      obj->base.size, rot_info->pitch, rot_info->height,
                      rot_info->pixel_format, rot_info->width_pages,
-                     rot_info->height_pages, size_pages);
+                     rot_info->height_pages, size_pages + size_pages_uv,
+                     size_pages);
 
        drm_free_large(page_addr_list);
 
@@ -2804,10 +3357,11 @@ err_st_alloc:
        drm_free_large(page_addr_list);
 
        DRM_DEBUG_KMS(
-                     "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n",
+                     "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
                      obj->base.size, ret, rot_info->pitch, rot_info->height,
                      rot_info->pixel_format, rot_info->width_pages,
-                     rot_info->height_pages, size_pages);
+                     rot_info->height_pages, size_pages + size_pages_uv,
+                     size_pages);
        return ERR_PTR(ret);
 }
 
index e1cfa292f9adf8cd63dff0c1c162115aef209eb2..9fbb07d6eaadf9aa4ff2b0c25ed195c34d1beffc 100644 (file)
@@ -39,6 +39,8 @@ struct drm_i915_file_private;
 typedef uint32_t gen6_pte_t;
 typedef uint64_t gen8_pte_t;
 typedef uint64_t gen8_pde_t;
+typedef uint64_t gen8_ppgtt_pdpe_t;
+typedef uint64_t gen8_ppgtt_pml4e_t;
 
 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
@@ -88,9 +90,18 @@ typedef uint64_t gen8_pde_t;
  * PDPE  |  PDE  |  PTE  | offset
  * The difference as compared to normal x86 3 level page table is the PDPEs are
  * programmed via register.
+ *
+ * GEN8 48b legacy style address is defined as a 4 level page table:
+ * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
+ * PML4E | PDPE  |  PDE  |  PTE  | offset
  */
+#define GEN8_PML4ES_PER_PML4           512
+#define GEN8_PML4E_SHIFT               39
+#define GEN8_PML4E_MASK                        (GEN8_PML4ES_PER_PML4 - 1)
 #define GEN8_PDPE_SHIFT                        30
-#define GEN8_PDPE_MASK                 0x3
+/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
+ * tables */
+#define GEN8_PDPE_MASK                 0x1ff
 #define GEN8_PDE_SHIFT                 21
 #define GEN8_PDE_MASK                  0x1ff
 #define GEN8_PTE_SHIFT                 12
@@ -98,6 +109,9 @@ typedef uint64_t gen8_pde_t;
 #define GEN8_LEGACY_PDPES              4
 #define GEN8_PTES                      I915_PTES(sizeof(gen8_pte_t))
 
+#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
+                                GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
+
 #define PPAT_UNCACHED_INDEX            (_PAGE_PWT | _PAGE_PCD)
 #define PPAT_CACHED_PDE_INDEX          0 /* WB LLC */
 #define PPAT_CACHED_INDEX              _PAGE_PAT /* WB LLCeLLC */
@@ -124,10 +138,14 @@ enum i915_ggtt_view_type {
 struct intel_rotation_info {
        unsigned int height;
        unsigned int pitch;
+       unsigned int uv_offset;
        uint32_t pixel_format;
        uint64_t fb_modifier;
        unsigned int width_pages, height_pages;
        uint64_t size;
+       unsigned int width_pages_uv, height_pages_uv;
+       uint64_t size_uv;
+       unsigned int uv_start_page;
 };
 
 struct i915_ggtt_view {
@@ -135,7 +153,7 @@ struct i915_ggtt_view {
 
        union {
                struct {
-                       unsigned long offset;
+                       u64 offset;
                        unsigned int size;
                } partial;
        } params;
@@ -241,9 +259,17 @@ struct i915_page_directory {
 };
 
 struct i915_page_directory_pointer {
-       /* struct page *page; */
-       DECLARE_BITMAP(used_pdpes, GEN8_LEGACY_PDPES);
-       struct i915_page_directory *page_directory[GEN8_LEGACY_PDPES];
+       struct i915_page_dma base;
+
+       unsigned long *used_pdpes;
+       struct i915_page_directory **page_directory;
+};
+
+struct i915_pml4 {
+       struct i915_page_dma base;
+
+       DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
+       struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
 };
 
 struct i915_address_space {
@@ -256,6 +282,7 @@ struct i915_address_space {
        struct i915_page_scratch *scratch_page;
        struct i915_page_table *scratch_pt;
        struct i915_page_directory *scratch_pd;
+       struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
 
        /**
         * List of objects currently involved in rendering.
@@ -318,6 +345,7 @@ struct i915_gtt {
        struct i915_address_space base;
 
        size_t stolen_size;             /* Total size of stolen memory */
+       size_t stolen_usable_size;      /* Total size minus BIOS reserved */
        u64 mappable_end;               /* End offset that we can CPU map */
        struct io_mapping *mappable;    /* Mapping to our CPU mappable region */
        phys_addr_t mappable_base;      /* PA of our GMADR */
@@ -341,8 +369,9 @@ struct i915_hw_ppgtt {
        struct drm_mm_node node;
        unsigned long pd_dirty_rings;
        union {
-               struct i915_page_directory_pointer pdp;
-               struct i915_page_directory pd;
+               struct i915_pml4 pml4;          /* GEN8+ & 48b PPGTT */
+               struct i915_page_directory_pointer pdp; /* GEN8+ */
+               struct i915_page_directory pd;          /* GEN6-7 */
        };
 
        struct drm_i915_file_private *file_priv;
@@ -436,24 +465,23 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
             temp = min(temp, length),                                  \
             start += temp, length -= temp)
 
-#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter)         \
-       for (iter = gen8_pdpe_index(start);     \
-            pd = (pdp)->page_directory[iter], length > 0 && iter < GEN8_LEGACY_PDPES;  \
+#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
+       for (iter = gen8_pdpe_index(start); \
+            pd = (pdp)->page_directory[iter], \
+            length > 0 && (iter < I915_PDPES_PER_PDP(dev)); \
             iter++,                            \
             temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start,       \
             temp = min(temp, length),                                  \
             start += temp, length -= temp)
 
-/* Clamp length to the next page_directory boundary */
-static inline uint64_t gen8_clamp_pd(uint64_t start, uint64_t length)
-{
-       uint64_t next_pd = ALIGN(start + 1, 1 << GEN8_PDPE_SHIFT);
-
-       if (next_pd > (start + length))
-               return length;
-
-       return next_pd - start;
-}
+#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter)      \
+       for (iter = gen8_pml4e_index(start);    \
+            pdp = (pml4)->pdps[iter], \
+            length > 0 && iter < GEN8_PML4ES_PER_PML4; \
+            iter++,                            \
+            temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start,   \
+            temp = min(temp, length),                                  \
+            start += temp, length -= temp)
 
 static inline uint32_t gen8_pte_index(uint64_t address)
 {
@@ -472,8 +500,7 @@ static inline uint32_t gen8_pdpe_index(uint64_t address)
 
 static inline uint32_t gen8_pml4e_index(uint64_t address)
 {
-       WARN_ON(1); /* For 64B */
-       return 0;
+       return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
 }
 
 static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
index f361c4a5699550dc581ad06bb79aa36a8aebb803..15207796e1b364487a0ce26d79d06b9a4fbf9dc5 100644 (file)
@@ -42,9 +42,9 @@
  * for is a boon.
  */
 
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
-                               struct drm_mm_node *node, u64 size,
-                               unsigned alignment)
+int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
+                                        struct drm_mm_node *node, u64 size,
+                                        unsigned alignment, u64 start, u64 end)
 {
        int ret;
 
@@ -52,13 +52,23 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
                return -ENODEV;
 
        mutex_lock(&dev_priv->mm.stolen_lock);
-       ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
-                                DRM_MM_SEARCH_DEFAULT);
+       ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
+                                         alignment, start, end,
+                                         DRM_MM_SEARCH_DEFAULT);
        mutex_unlock(&dev_priv->mm.stolen_lock);
 
        return ret;
 }
 
+int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
+                               struct drm_mm_node *node, u64 size,
+                               unsigned alignment)
+{
+       return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
+                                       alignment, 0,
+                                       dev_priv->gtt.stolen_usable_size);
+}
+
 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
                                 struct drm_mm_node *node)
 {
@@ -186,6 +196,29 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
        drm_mm_takedown(&dev_priv->mm.stolen);
 }
 
+static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
+                                   unsigned long *base, unsigned long *size)
+{
+       uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
+                                    CTG_STOLEN_RESERVED :
+                                    ELK_STOLEN_RESERVED);
+       unsigned long stolen_top = dev_priv->mm.stolen_base +
+               dev_priv->gtt.stolen_size;
+
+       *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
+
+       WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
+
+       /* On these platforms, the register doesn't have a size field, so the
+        * size is the distance between the base and the top of the stolen
+        * memory. We also have the genuine case where base is zero and there's
+        * nothing reserved. */
+       if (*base == 0)
+               *size = 0;
+       else
+               *size = stolen_top - *base;
+}
+
 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
                                     unsigned long *base, unsigned long *size)
 {
@@ -281,7 +314,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
 int i915_gem_init_stolen(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long reserved_total, reserved_base, reserved_size;
+       unsigned long reserved_total, reserved_base = 0, reserved_size;
        unsigned long stolen_top;
 
        mutex_init(&dev_priv->mm.stolen_lock);
@@ -305,7 +338,12 @@ int i915_gem_init_stolen(struct drm_device *dev)
        switch (INTEL_INFO(dev_priv)->gen) {
        case 2:
        case 3:
+               break;
        case 4:
+               if (IS_G4X(dev))
+                       g4x_get_stolen_reserved(dev_priv, &reserved_base,
+                                               &reserved_size);
+               break;
        case 5:
                /* Assume the gen6 maximum for the older platforms. */
                reserved_size = 1024 * 1024;
@@ -352,9 +390,11 @@ int i915_gem_init_stolen(struct drm_device *dev)
                      dev_priv->gtt.stolen_size >> 10,
                      (dev_priv->gtt.stolen_size - reserved_total) >> 10);
 
+       dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
+                                          reserved_total;
+
        /* Basic memrange allocator for stolen space */
-       drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
-                   reserved_total);
+       drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
 
        return 0;
 }
@@ -544,7 +584,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
-               goto err_out;
+               goto err;
        }
 
        /* To simplify the initialisation sequence between KMS and GTT,
@@ -558,23 +598,19 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
                if (ret) {
                        DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-                       goto err_vma;
+                       goto err;
                }
-       }
 
-       vma->bound |= GLOBAL_BIND;
+               vma->bound |= GLOBAL_BIND;
+               list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+       }
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&vma->mm_list, &ggtt->inactive_list);
        i915_gem_object_pin_pages(obj);
 
        return obj;
 
-err_vma:
-       i915_gem_vma_destroy(vma);
-err_out:
-       i915_gem_stolen_remove_node(dev_priv, stolen);
-       kfree(stolen);
+err:
        drm_gem_object_unreference(&obj->base);
        return NULL;
 }
index 8fd431bcdfd3a33ffb6afda7a1584b44e33d8296..d11901d590ac85ab5f3b646ce6e5a916b8c8f1f4 100644 (file)
@@ -813,7 +813,6 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
 int
 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_userptr *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -826,9 +825,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
        if (offset_in_page(args->user_ptr | args->user_size))
                return -EINVAL;
 
-       if (args->user_size > dev_priv->gtt.base.total)
-               return -E2BIG;
-
        if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
                       (char __user *)(unsigned long)args->user_ptr, args->user_size))
                return -EFAULT;
index 41d0739e6fdfa9474a21b63b754c9000a7b33421..f95de05f793d176030521c86c17081baf34c7021 100644 (file)
 #include <generated/utsrelease.h>
 #include "i915_drv.h"
 
-static const char *yesno(int v)
-{
-       return v ? "yes" : "no";
-}
-
 static const char *ring_str(int ring)
 {
        switch (ring) {
@@ -197,8 +192,9 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
        err_printf(m, "  %s [%d]:\n", name, count);
 
        while (count--) {
-               err_printf(m, "    %08x %8u %02x %02x [ ",
-                          err->gtt_offset,
+               err_printf(m, "    %08x_%08x %8u %02x %02x [ ",
+                          upper_32_bits(err->gtt_offset),
+                          lower_32_bits(err->gtt_offset),
                           err->size,
                           err->read_domains,
                           err->write_domain);
@@ -427,15 +423,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                                err_printf(m, " (submitted by %s [%d])",
                                           error->ring[i].comm,
                                           error->ring[i].pid);
-                       err_printf(m, " --- gtt_offset = 0x%08x\n",
-                                  obj->gtt_offset);
+                       err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
+                                  upper_32_bits(obj->gtt_offset),
+                                  lower_32_bits(obj->gtt_offset));
                        print_error_obj(m, obj);
                }
 
                obj = error->ring[i].wa_batchbuffer;
                if (obj) {
                        err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
-                                  dev_priv->ring[i].name, obj->gtt_offset);
+                                  dev_priv->ring[i].name,
+                                  lower_32_bits(obj->gtt_offset));
                        print_error_obj(m, obj);
                }
 
@@ -454,22 +452,28 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                if ((obj = error->ring[i].ringbuffer)) {
                        err_printf(m, "%s --- ringbuffer = 0x%08x\n",
                                   dev_priv->ring[i].name,
-                                  obj->gtt_offset);
+                                  lower_32_bits(obj->gtt_offset));
                        print_error_obj(m, obj);
                }
 
                if ((obj = error->ring[i].hws_page)) {
-                       err_printf(m, "%s --- HW Status = 0x%08x\n",
-                                  dev_priv->ring[i].name,
-                                  obj->gtt_offset);
+                       u64 hws_offset = obj->gtt_offset;
+                       u32 *hws_page = &obj->pages[0][0];
+
+                       if (i915.enable_execlists) {
+                               hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
+                               hws_page = &obj->pages[LRC_PPHWSP_PN][0];
+                       }
+                       err_printf(m, "%s --- HW Status = 0x%08llx\n",
+                                  dev_priv->ring[i].name, hws_offset);
                        offset = 0;
                        for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
                                err_printf(m, "[%04x] %08x %08x %08x %08x\n",
                                           offset,
-                                          obj->pages[0][elt],
-                                          obj->pages[0][elt+1],
-                                          obj->pages[0][elt+2],
-                                          obj->pages[0][elt+3]);
+                                          hws_page[elt],
+                                          hws_page[elt+1],
+                                          hws_page[elt+2],
+                                          hws_page[elt+3]);
                                        offset += 16;
                        }
                }
@@ -477,13 +481,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                if ((obj = error->ring[i].ctx)) {
                        err_printf(m, "%s --- HW Context = 0x%08x\n",
                                   dev_priv->ring[i].name,
-                                  obj->gtt_offset);
+                                  lower_32_bits(obj->gtt_offset));
                        print_error_obj(m, obj);
                }
        }
 
        if ((obj = error->semaphore_obj)) {
-               err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
+               err_printf(m, "Semaphore page = 0x%08x\n",
+                          lower_32_bits(obj->gtt_offset));
                for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
                        err_printf(m, "[%04x] %08x %08x %08x %08x\n",
                                   elt * 4,
@@ -591,7 +596,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
        int num_pages;
        bool use_ggtt;
        int i = 0;
-       u32 reloc_offset;
+       u64 reloc_offset;
 
        if (src == NULL || src->pages == NULL)
                return NULL;
index ccdc6c8ac20b0c64e33cab97869bd5d74edb4d72..9d79a6b7cc2fad0fd3023a1d0262469c985e1e5b 100644 (file)
 #define   GS_MIA_SHIFT                 16
 #define   GS_MIA_MASK                    (0x07 << GS_MIA_SHIFT)
 
-#define GUC_WOPCM_SIZE                 0xc050
-#define   GUC_WOPCM_SIZE_VALUE           (0x80 << 12)  /* 512KB */
-#define GUC_WOPCM_OFFSET               0x80000         /* 512KB */
-
 #define SOFT_SCRATCH(n)                        (0xc180 + ((n) * 4))
 
 #define UOS_RSA_SCRATCH_0              0xc200
 #define   UOS_MOVE                       (1<<4)
 #define   START_DMA                      (1<<0)
 #define DMA_GUC_WOPCM_OFFSET           0xc340
+#define   GUC_WOPCM_OFFSET_VALUE         0x80000       /* 512KB */
+#define GUC_MAX_IDLE_COUNT             0xC3E4
+
+#define GUC_WOPCM_SIZE                 0xc050
+#define   GUC_WOPCM_SIZE_VALUE           (0x80 << 12)  /* 512KB */
+
+/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
+#define        GUC_WOPCM_TOP                   (GUC_WOPCM_SIZE_VALUE)
 
 #define GEN8_GT_PM_CONFIG              0x138140
+#define GEN9LP_GT_PM_CONFIG            0x138140
 #define GEN9_GT_PM_CONFIG              0x13816c
-#define   GEN8_GT_DOORBELL_ENABLE        (1<<0)
+#define   GT_DOORBELL_ENABLE             (1<<0)
 
 #define GEN8_GTCR                      0x4274
 #define   GEN8_GTCR_INVALIDATE           (1<<0)
@@ -80,7 +85,8 @@
                                 GUC_ENABLE_READ_CACHE_LOGIC            | \
                                 GUC_ENABLE_MIA_CACHING                 | \
                                 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA    | \
-                                GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA)
+                                GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA   | \
+                                GUC_ENABLE_MIA_CLOCK_GATING)
 
 #define HOST2GUC_INTERRUPT             0xc4c8
 #define   HOST2GUC_TRIGGER               (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
new file mode 100644 (file)
index 0000000..792d0b9
--- /dev/null
@@ -0,0 +1,916 @@
+/*
+ * Copyright Â© 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/circ_buf.h>
+#include "i915_drv.h"
+#include "intel_guc.h"
+
+/**
+ * DOC: GuC Client
+ *
+ * i915_guc_client:
+ * We use the term client to avoid confusion with contexts. A i915_guc_client is
+ * equivalent to GuC object guc_context_desc. This context descriptor is
+ * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
+ * and workqueue for it. Also the process descriptor (guc_process_desc), which
+ * is mapped to client space. So the client can write Work Item then ring the
+ * doorbell.
+ *
+ * To simplify the implementation, we allocate one gem object that contains all
+ * pages for doorbell, process descriptor and workqueue.
+ *
+ * The Scratch registers:
+ * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
+ * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
+ * triggers an interrupt on the GuC via another register write (0xC4C8).
+ * Firmware writes a success/fail code back to the action register after
+ * processes the request. The kernel driver polls waiting for this update and
+ * then proceeds.
+ * See host2guc_action()
+ *
+ * Doorbells:
+ * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
+ * mapped into process space.
+ *
+ * Work Items:
+ * There are several types of work items that the host may place into a
+ * workqueue, each with its own requirements and limitations. Currently only
+ * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
+ * represents in-order queue. The kernel driver packs ring tail pointer and an
+ * ELSP context descriptor dword into Work Item.
+ * See guc_add_workqueue_item()
+ *
+ */
+
+/*
+ * Read GuC command/status register (SOFT_SCRATCH_0)
+ * Return true if it contains a response rather than a command
+ */
+static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
+                                           u32 *status)
+{
+       u32 val = I915_READ(SOFT_SCRATCH(0));
+       *status = val;
+       return GUC2HOST_IS_RESPONSE(val);
+}
+
+static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       u32 status;
+       int i;
+       int ret;
+
+       if (WARN_ON(len < 1 || len > 15))
+               return -EINVAL;
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+       spin_lock(&dev_priv->guc.host2guc_lock);
+
+       dev_priv->guc.action_count += 1;
+       dev_priv->guc.action_cmd = data[0];
+
+       for (i = 0; i < len; i++)
+               I915_WRITE(SOFT_SCRATCH(i), data[i]);
+
+       POSTING_READ(SOFT_SCRATCH(i - 1));
+
+       I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
+
+       /* No HOST2GUC command should take longer than 10ms */
+       ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
+       if (status != GUC2HOST_STATUS_SUCCESS) {
+               /*
+                * Either the GuC explicitly returned an error (which
+                * we convert to -EIO here) or no response at all was
+                * received within the timeout limit (-ETIMEDOUT)
+                */
+               if (ret != -ETIMEDOUT)
+                       ret = -EIO;
+
+               DRM_ERROR("GUC: host2guc action 0x%X failed. ret=%d "
+                               "status=0x%08X response=0x%08X\n",
+                               data[0], ret, status,
+                               I915_READ(SOFT_SCRATCH(15)));
+
+               dev_priv->guc.action_fail += 1;
+               dev_priv->guc.action_err = ret;
+       }
+       dev_priv->guc.action_status = status;
+
+       spin_unlock(&dev_priv->guc.host2guc_lock);
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+       return ret;
+}
+
+/*
+ * Tell the GuC to allocate or deallocate a specific doorbell
+ */
+
+static int host2guc_allocate_doorbell(struct intel_guc *guc,
+                                     struct i915_guc_client *client)
+{
+       u32 data[2];
+
+       data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
+       data[1] = client->ctx_index;
+
+       return host2guc_action(guc, data, 2);
+}
+
+static int host2guc_release_doorbell(struct intel_guc *guc,
+                                    struct i915_guc_client *client)
+{
+       u32 data[2];
+
+       data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
+       data[1] = client->ctx_index;
+
+       return host2guc_action(guc, data, 2);
+}
+
+static int host2guc_sample_forcewake(struct intel_guc *guc,
+                                    struct i915_guc_client *client)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       u32 data[2];
+
+       data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
+       data[1] = (intel_enable_rc6(dev_priv->dev)) ? 1 : 0;
+
+       return host2guc_action(guc, data, 2);
+}
+
+/*
+ * Initialise, update, or clear doorbell data shared with the GuC
+ *
+ * These functions modify shared data and so need access to the mapped
+ * client object which contains the page being used for the doorbell
+ */
+
+static void guc_init_doorbell(struct intel_guc *guc,
+                             struct i915_guc_client *client)
+{
+       struct guc_doorbell_info *doorbell;
+       void *base;
+
+       base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
+       doorbell = base + client->doorbell_offset;
+
+       doorbell->db_status = 1;
+       doorbell->cookie = 0;
+
+       kunmap_atomic(base);
+}
+
+static int guc_ring_doorbell(struct i915_guc_client *gc)
+{
+       struct guc_process_desc *desc;
+       union guc_doorbell_qw db_cmp, db_exc, db_ret;
+       union guc_doorbell_qw *db;
+       void *base;
+       int attempt = 2, ret = -EAGAIN;
+
+       base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
+       desc = base + gc->proc_desc_offset;
+
+       /* Update the tail so it is visible to GuC */
+       desc->tail = gc->wq_tail;
+
+       /* current cookie */
+       db_cmp.db_status = GUC_DOORBELL_ENABLED;
+       db_cmp.cookie = gc->cookie;
+
+       /* cookie to be updated */
+       db_exc.db_status = GUC_DOORBELL_ENABLED;
+       db_exc.cookie = gc->cookie + 1;
+       if (db_exc.cookie == 0)
+               db_exc.cookie = 1;
+
+       /* pointer of current doorbell cacheline */
+       db = base + gc->doorbell_offset;
+
+       while (attempt--) {
+               /* lets ring the doorbell */
+               db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
+                       db_cmp.value_qw, db_exc.value_qw);
+
+               /* if the exchange was successfully executed */
+               if (db_ret.value_qw == db_cmp.value_qw) {
+                       /* db was successfully rung */
+                       gc->cookie = db_exc.cookie;
+                       ret = 0;
+                       break;
+               }
+
+               /* XXX: doorbell was lost and need to acquire it again */
+               if (db_ret.db_status == GUC_DOORBELL_DISABLED)
+                       break;
+
+               DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
+                         db_cmp.cookie, db_ret.cookie);
+
+               /* update the cookie to newly read cookie from GuC */
+               db_cmp.cookie = db_ret.cookie;
+               db_exc.cookie = db_ret.cookie + 1;
+               if (db_exc.cookie == 0)
+                       db_exc.cookie = 1;
+       }
+
+       kunmap_atomic(base);
+       return ret;
+}
+
+static void guc_disable_doorbell(struct intel_guc *guc,
+                                struct i915_guc_client *client)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct guc_doorbell_info *doorbell;
+       void *base;
+       int drbreg = GEN8_DRBREGL(client->doorbell_id);
+       int value;
+
+       base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
+       doorbell = base + client->doorbell_offset;
+
+       doorbell->db_status = 0;
+
+       kunmap_atomic(base);
+
+       I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
+
+       value = I915_READ(drbreg);
+       WARN_ON((value & GEN8_DRB_VALID) != 0);
+
+       I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0);
+       I915_WRITE(drbreg, 0);
+
+       /* XXX: wait for any interrupts */
+       /* XXX: wait for workqueue to drain */
+}
+
+/*
+ * Select, assign and relase doorbell cachelines
+ *
+ * These functions track which doorbell cachelines are in use.
+ * The data they manipulate is protected by the host2guc lock.
+ */
+
+static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
+{
+       const uint32_t cacheline_size = cache_line_size();
+       uint32_t offset;
+
+       spin_lock(&guc->host2guc_lock);
+
+       /* Doorbell uses a single cache line within a page */
+       offset = offset_in_page(guc->db_cacheline);
+
+       /* Moving to next cache line to reduce contention */
+       guc->db_cacheline += cacheline_size;
+
+       spin_unlock(&guc->host2guc_lock);
+
+       DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
+                       offset, guc->db_cacheline, cacheline_size);
+
+       return offset;
+}
+
+static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
+{
+       /*
+        * The bitmap is split into two halves; the first half is used for
+        * normal priority contexts, the second half for high-priority ones.
+        * Note that logically higher priorities are numerically less than
+        * normal ones, so the test below means "is it high-priority?"
+        */
+       const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
+       const uint16_t half = GUC_MAX_DOORBELLS / 2;
+       const uint16_t start = hi_pri ? half : 0;
+       const uint16_t end = start + half;
+       uint16_t id;
+
+       spin_lock(&guc->host2guc_lock);
+       id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
+       if (id == end)
+               id = GUC_INVALID_DOORBELL_ID;
+       else
+               bitmap_set(guc->doorbell_bitmap, id, 1);
+       spin_unlock(&guc->host2guc_lock);
+
+       DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
+                       hi_pri ? "high" : "normal", id);
+
+       return id;
+}
+
+static void release_doorbell(struct intel_guc *guc, uint16_t id)
+{
+       spin_lock(&guc->host2guc_lock);
+       bitmap_clear(guc->doorbell_bitmap, id, 1);
+       spin_unlock(&guc->host2guc_lock);
+}
+
+/*
+ * Initialise the process descriptor shared with the GuC firmware.
+ */
+static void guc_init_proc_desc(struct intel_guc *guc,
+                              struct i915_guc_client *client)
+{
+       struct guc_process_desc *desc;
+       void *base;
+
+       base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
+       desc = base + client->proc_desc_offset;
+
+       memset(desc, 0, sizeof(*desc));
+
+       /*
+        * XXX: pDoorbell and WQVBaseAddress are pointers in process address
+        * space for ring3 clients (set them as in mmap_ioctl) or kernel
+        * space for kernel clients (map on demand instead? May make debug
+        * easier to have it mapped).
+        */
+       desc->wq_base_addr = 0;
+       desc->db_base_addr = 0;
+
+       desc->context_id = client->ctx_index;
+       desc->wq_size_bytes = client->wq_size;
+       desc->wq_status = WQ_STATUS_ACTIVE;
+       desc->priority = client->priority;
+
+       kunmap_atomic(base);
+}
+
+/*
+ * Initialise/clear the context descriptor shared with the GuC firmware.
+ *
+ * This descriptor tells the GuC where (in GGTT space) to find the important
+ * data structures relating to this client (doorbell, process descriptor,
+ * write queue, etc).
+ */
+
+static void guc_init_ctx_desc(struct intel_guc *guc,
+                             struct i915_guc_client *client)
+{
+       struct intel_context *ctx = client->owner;
+       struct guc_context_desc desc;
+       struct sg_table *sg;
+       int i;
+
+       memset(&desc, 0, sizeof(desc));
+
+       desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
+       desc.context_id = client->ctx_index;
+       desc.priority = client->priority;
+       desc.db_id = client->doorbell_id;
+
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct guc_execlist_context *lrc = &desc.lrc[i];
+               struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
+               struct intel_engine_cs *ring;
+               struct drm_i915_gem_object *obj;
+               uint64_t ctx_desc;
+
+               /* TODO: We have a design issue to be solved here. Only when we
+                * receive the first batch, we know which engine is used by the
+                * user. But here GuC expects the lrc and ring to be pinned. It
+                * is not an issue for default context, which is the only one
+                * for now who owns a GuC client. But for future owner of GuC
+                * client, need to make sure lrc is pinned prior to enter here.
+                */
+               obj = ctx->engine[i].state;
+               if (!obj)
+                       break;  /* XXX: continue? */
+
+               ring = ringbuf->ring;
+               ctx_desc = intel_lr_context_descriptor(ctx, ring);
+               lrc->context_desc = (u32)ctx_desc;
+
+               /* The state page is after PPHWSP */
+               lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
+                               LRC_STATE_PN * PAGE_SIZE;
+               lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
+                               (ring->id << GUC_ELC_ENGINE_OFFSET);
+
+               obj = ringbuf->obj;
+
+               lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
+               lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
+               lrc->ring_next_free_location = lrc->ring_begin;
+               lrc->ring_current_tail_pointer_value = 0;
+
+               desc.engines_used |= (1 << ring->id);
+       }
+
+       WARN_ON(desc.engines_used == 0);
+
+       /*
+        * The CPU address is only needed at certain points, so kmap_atomic on
+        * demand instead of storing it in the ctx descriptor.
+        * XXX: May make debug easier to have it mapped
+        */
+       desc.db_trigger_cpu = 0;
+       desc.db_trigger_uk = client->doorbell_offset +
+               i915_gem_obj_ggtt_offset(client->client_obj);
+       desc.db_trigger_phy = client->doorbell_offset +
+               sg_dma_address(client->client_obj->pages->sgl);
+
+       desc.process_desc = client->proc_desc_offset +
+               i915_gem_obj_ggtt_offset(client->client_obj);
+
+       desc.wq_addr = client->wq_offset +
+               i915_gem_obj_ggtt_offset(client->client_obj);
+
+       desc.wq_size = client->wq_size;
+
+       /*
+        * XXX: Take LRCs from an existing intel_context if this is not an
+        * IsKMDCreatedContext client
+        */
+       desc.desc_private = (uintptr_t)client;
+
+       /* Pool context is pinned already */
+       sg = guc->ctx_pool_obj->pages;
+       sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+                            sizeof(desc) * client->ctx_index);
+}
+
+static void guc_fini_ctx_desc(struct intel_guc *guc,
+                             struct i915_guc_client *client)
+{
+       struct guc_context_desc desc;
+       struct sg_table *sg;
+
+       memset(&desc, 0, sizeof(desc));
+
+       sg = guc->ctx_pool_obj->pages;
+       sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+                            sizeof(desc) * client->ctx_index);
+}
+
+/* Get valid workqueue item and return it back to offset */
+static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
+{
+       struct guc_process_desc *desc;
+       void *base;
+       u32 size = sizeof(struct guc_wq_item);
+       int ret = 0, timeout_counter = 200;
+
+       base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
+       desc = base + gc->proc_desc_offset;
+
+       while (timeout_counter-- > 0) {
+               ret = wait_for_atomic(CIRC_SPACE(gc->wq_tail, desc->head,
+                               gc->wq_size) >= size, 1);
+
+               if (!ret) {
+                       *offset = gc->wq_tail;
+
+                       /* advance the tail for next workqueue item */
+                       gc->wq_tail += size;
+                       gc->wq_tail &= gc->wq_size - 1;
+
+                       /* this will break the loop */
+                       timeout_counter = 0;
+               }
+       };
+
+       kunmap_atomic(base);
+
+       return ret;
+}
+
+static int guc_add_workqueue_item(struct i915_guc_client *gc,
+                                 struct drm_i915_gem_request *rq)
+{
+       enum intel_ring_id ring_id = rq->ring->id;
+       struct guc_wq_item *wqi;
+       void *base;
+       u32 tail, wq_len, wq_off = 0;
+       int ret;
+
+       ret = guc_get_workqueue_space(gc, &wq_off);
+       if (ret)
+               return ret;
+
+       /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
+        * should not have the case where structure wqi is across page, neither
+        * wrapped to the beginning. This simplifies the implementation below.
+        *
+        * XXX: if not the case, we need save data to a temp wqi and copy it to
+        * workqueue buffer dw by dw.
+        */
+       WARN_ON(sizeof(struct guc_wq_item) != 16);
+       WARN_ON(wq_off & 3);
+
+       /* wq starts from the page after doorbell / process_desc */
+       base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
+                       (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
+       wq_off &= PAGE_SIZE - 1;
+       wqi = (struct guc_wq_item *)((char *)base + wq_off);
+
+       /* len does not include the header */
+       wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
+       wqi->header = WQ_TYPE_INORDER |
+                       (wq_len << WQ_LEN_SHIFT) |
+                       (ring_id << WQ_TARGET_SHIFT) |
+                       WQ_NO_WCFLUSH_WAIT;
+
+       /* The GuC wants only the low-order word of the context descriptor */
+       wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
+
+       /* The GuC firmware wants the tail index in QWords, not bytes */
+       tail = rq->ringbuf->tail >> 3;
+       wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
+       wqi->fence_id = 0; /*XXX: what fence to be here */
+
+       kunmap_atomic(base);
+
+       return 0;
+}
+
+#define CTX_RING_BUFFER_START          0x08
+
+/* Update the ringbuffer pointer in a saved context image */
+static void lr_context_update(struct drm_i915_gem_request *rq)
+{
+       enum intel_ring_id ring_id = rq->ring->id;
+       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
+       struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
+       struct page *page;
+       uint32_t *reg_state;
+
+       BUG_ON(!ctx_obj);
+       WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+       WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
+       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+       reg_state = kmap_atomic(page);
+
+       reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
+
+       kunmap_atomic(reg_state);
+}
+
+/**
+ * i915_guc_submit() - Submit commands through GuC
+ * @client:    the guc client where commands will go through
+ * @ctx:       LRC where commands come from
+ * @ring:      HW engine that will excute the commands
+ *
+ * Return:     0 if succeed
+ */
+int i915_guc_submit(struct i915_guc_client *client,
+                   struct drm_i915_gem_request *rq)
+{
+       struct intel_guc *guc = client->guc;
+       enum intel_ring_id ring_id = rq->ring->id;
+       unsigned long flags;
+       int q_ret, b_ret;
+
+       /* Need this because of the deferred pin ctx and ring */
+       /* Shall we move this right after ring is pinned? */
+       lr_context_update(rq);
+
+       spin_lock_irqsave(&client->wq_lock, flags);
+
+       q_ret = guc_add_workqueue_item(client, rq);
+       if (q_ret == 0)
+               b_ret = guc_ring_doorbell(client);
+
+       client->submissions[ring_id] += 1;
+       if (q_ret) {
+               client->q_fail += 1;
+               client->retcode = q_ret;
+       } else if (b_ret) {
+               client->b_fail += 1;
+               client->retcode = q_ret = b_ret;
+       } else {
+               client->retcode = 0;
+       }
+       spin_unlock_irqrestore(&client->wq_lock, flags);
+
+       spin_lock(&guc->host2guc_lock);
+       guc->submissions[ring_id] += 1;
+       guc->last_seqno[ring_id] = rq->seqno;
+       spin_unlock(&guc->host2guc_lock);
+
+       return q_ret;
+}
+
+/*
+ * Everything below here is concerned with setup & teardown, and is
+ * therefore not part of the somewhat time-critical batch-submission
+ * path of i915_guc_submit() above.
+ */
+
+/**
+ * gem_allocate_guc_obj() - Allocate gem object for GuC usage
+ * @dev:       drm device
+ * @size:      size of object
+ *
+ * This is a wrapper to create a gem obj. In order to use it inside GuC, the
+ * object needs to be pinned lifetime. Also we must pin it to gtt space other
+ * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC.
+ *
+ * Return:     A drm_i915_gem_object if successful, otherwise NULL.
+ */
+static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
+                                                       u32 size)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj;
+
+       obj = i915_gem_alloc_object(dev, size);
+       if (!obj)
+               return NULL;
+
+       if (i915_gem_object_get_pages(obj)) {
+               drm_gem_object_unreference(&obj->base);
+               return NULL;
+       }
+
+       if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+                       PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
+               drm_gem_object_unreference(&obj->base);
+               return NULL;
+       }
+
+       /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
+       I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+
+       return obj;
+}
+
+/**
+ * gem_release_guc_obj() - Release gem object allocated for GuC usage
+ * @obj:       gem obj to be released
+  */
+static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
+{
+       if (!obj)
+               return;
+
+       if (i915_gem_obj_is_pinned(obj))
+               i915_gem_object_ggtt_unpin(obj);
+
+       drm_gem_object_unreference(&obj->base);
+}
+
+static void guc_client_free(struct drm_device *dev,
+                           struct i915_guc_client *client)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_guc *guc = &dev_priv->guc;
+
+       if (!client)
+               return;
+
+       if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
+               /*
+                * First disable the doorbell, then tell the GuC we've
+                * finished with it, finally deallocate it in our bitmap
+                */
+               guc_disable_doorbell(guc, client);
+               host2guc_release_doorbell(guc, client);
+               release_doorbell(guc, client->doorbell_id);
+       }
+
+       /*
+        * XXX: wait for any outstanding submissions before freeing memory.
+        * Be sure to drop any locks
+        */
+
+       gem_release_guc_obj(client->client_obj);
+
+       if (client->ctx_index != GUC_INVALID_CTX_ID) {
+               guc_fini_ctx_desc(guc, client);
+               ida_simple_remove(&guc->ctx_ids, client->ctx_index);
+       }
+
+       kfree(client);
+}
+
+/**
+ * guc_client_alloc() - Allocate an i915_guc_client
+ * @dev:       drm device
+ * @priority:  four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
+ *             The kernel client to replace ExecList submission is created with
+ *             NORMAL priority. Priority of a client for scheduler can be HIGH,
+ *             while a preemption context can use CRITICAL.
+ * @ctx                the context to own the client (we use the default render context)
+ *
+ * Return:     An i915_guc_client object if success.
+ */
+static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
+                                               uint32_t priority,
+                                               struct intel_context *ctx)
+{
+       struct i915_guc_client *client;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_guc *guc = &dev_priv->guc;
+       struct drm_i915_gem_object *obj;
+
+       client = kzalloc(sizeof(*client), GFP_KERNEL);
+       if (!client)
+               return NULL;
+
+       client->doorbell_id = GUC_INVALID_DOORBELL_ID;
+       client->priority = priority;
+       client->owner = ctx;
+       client->guc = guc;
+
+       client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
+                       GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
+       if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
+               client->ctx_index = GUC_INVALID_CTX_ID;
+               goto err;
+       }
+
+       /* The first page is doorbell/proc_desc. Two followed pages are wq. */
+       obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE);
+       if (!obj)
+               goto err;
+
+       client->client_obj = obj;
+       client->wq_offset = GUC_DB_SIZE;
+       client->wq_size = GUC_WQ_SIZE;
+       spin_lock_init(&client->wq_lock);
+
+       client->doorbell_offset = select_doorbell_cacheline(guc);
+
+       /*
+        * Since the doorbell only requires a single cacheline, we can save
+        * space by putting the application process descriptor in the same
+        * page. Use the half of the page that doesn't include the doorbell.
+        */
+       if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
+               client->proc_desc_offset = 0;
+       else
+               client->proc_desc_offset = (GUC_DB_SIZE / 2);
+
+       client->doorbell_id = assign_doorbell(guc, client->priority);
+       if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
+               /* XXX: evict a doorbell instead */
+               goto err;
+
+       guc_init_proc_desc(guc, client);
+       guc_init_ctx_desc(guc, client);
+       guc_init_doorbell(guc, client);
+
+       /* XXX: Any cache flushes needed? General domain mgmt calls? */
+
+       if (host2guc_allocate_doorbell(guc, client))
+               goto err;
+
+       DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
+               priority, client, client->ctx_index, client->doorbell_id);
+
+       return client;
+
+err:
+       DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
+
+       guc_client_free(dev, client);
+       return NULL;
+}
+
+static void guc_create_log(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct drm_i915_gem_object *obj;
+       unsigned long offset;
+       uint32_t size, flags;
+
+       if (i915.guc_log_level < GUC_LOG_VERBOSITY_MIN)
+               return;
+
+       if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+               i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+
+       /* The first page is to save log buffer state. Allocate one
+        * extra page for others in case for overlap */
+       size = (1 + GUC_LOG_DPC_PAGES + 1 +
+               GUC_LOG_ISR_PAGES + 1 +
+               GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+
+       obj = guc->log_obj;
+       if (!obj) {
+               obj = gem_allocate_guc_obj(dev_priv->dev, size);
+               if (!obj) {
+                       /* logging will be off */
+                       i915.guc_log_level = -1;
+                       return;
+               }
+
+               guc->log_obj = obj;
+       }
+
+       /* each allocated unit is a page */
+       flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
+               (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
+               (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
+               (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+
+       offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */
+       guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+/*
+ * Set up the memory resources to be shared with the GuC.  At this point,
+ * we require just one object that can be mapped through the GGTT.
+ */
+int i915_guc_submission_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       const size_t ctxsize = sizeof(struct guc_context_desc);
+       const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
+       const size_t gemsize = round_up(poolsize, PAGE_SIZE);
+       struct intel_guc *guc = &dev_priv->guc;
+
+       if (!i915.enable_guc_submission)
+               return 0; /* not enabled  */
+
+       if (guc->ctx_pool_obj)
+               return 0; /* already allocated */
+
+       guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
+       if (!guc->ctx_pool_obj)
+               return -ENOMEM;
+
+       spin_lock_init(&dev_priv->guc.host2guc_lock);
+
+       ida_init(&guc->ctx_ids);
+
+       guc_create_log(guc);
+
+       return 0;
+}
+
+int i915_guc_submission_enable(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_context *ctx = dev_priv->ring[RCS].default_context;
+       struct i915_guc_client *client;
+
+       /* client for execbuf submission */
+       client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
+       if (!client) {
+               DRM_ERROR("Failed to create execbuf guc_client\n");
+               return -ENOMEM;
+       }
+
+       guc->execbuf_client = client;
+
+       host2guc_sample_forcewake(guc, client);
+
+       return 0;
+}
+
+void i915_guc_submission_disable(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_guc *guc = &dev_priv->guc;
+
+       guc_client_free(dev, guc->execbuf_client);
+       guc->execbuf_client = NULL;
+}
+
+void i915_guc_submission_fini(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_guc *guc = &dev_priv->guc;
+
+       gem_release_guc_obj(dev_priv->guc.log_obj);
+       guc->log_obj = NULL;
+
+       if (guc->ctx_pool_obj)
+               ida_destroy(&guc->ctx_ids);
+       gem_release_guc_obj(guc->ctx_pool_obj);
+       guc->ctx_pool_obj = NULL;
+}
index 39d73dbc1c4774857a96b7dedf0fe2d156177759..45086e15459ac14943e994c9bc65884b9a61cee8 100644 (file)
  * and related files, but that will be described in separate chapters.
  */
 
+static const u32 hpd_ilk[HPD_NUM_PINS] = {
+       [HPD_PORT_A] = DE_DP_A_HOTPLUG,
+};
+
+static const u32 hpd_ivb[HPD_NUM_PINS] = {
+       [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
+};
+
+static const u32 hpd_bdw[HPD_NUM_PINS] = {
+       [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
+};
+
 static const u32 hpd_ibx[HPD_NUM_PINS] = {
        [HPD_CRT] = SDE_CRT_HOTPLUG,
        [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
@@ -62,6 +74,7 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
 };
 
 static const u32 hpd_spt[HPD_NUM_PINS] = {
+       [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
        [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
        [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
        [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
@@ -97,6 +110,7 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
 
 /* BXT hpd list */
 static const u32 hpd_bxt[HPD_NUM_PINS] = {
+       [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
        [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
        [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
 };
@@ -154,36 +168,85 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
 
 /* For display hotplug interrupt */
-void
-ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
+static inline void
+i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
+                                    uint32_t mask,
+                                    uint32_t bits)
 {
+       uint32_t val;
+
        assert_spin_locked(&dev_priv->irq_lock);
+       WARN_ON(bits & ~mask);
 
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return;
+       val = I915_READ(PORT_HOTPLUG_EN);
+       val &= ~mask;
+       val |= bits;
+       I915_WRITE(PORT_HOTPLUG_EN, val);
+}
 
-       if ((dev_priv->irq_mask & mask) != 0) {
-               dev_priv->irq_mask &= ~mask;
-               I915_WRITE(DEIMR, dev_priv->irq_mask);
-               POSTING_READ(DEIMR);
-       }
+/**
+ * i915_hotplug_interrupt_update - update hotplug interrupt enable
+ * @dev_priv: driver private
+ * @mask: bits to update
+ * @bits: bits to enable
+ * NOTE: the HPD enable bits are modified both inside and outside
+ * of an interrupt context. To avoid that read-modify-write cycles
+ * interfer, these bits are protected by a spinlock. Since this
+ * function is usually not called from a context where the lock is
+ * held already, this function acquires the lock itself. A non-locking
+ * version is also available.
+ */
+void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
+                                  uint32_t mask,
+                                  uint32_t bits)
+{
+       spin_lock_irq(&dev_priv->irq_lock);
+       i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
+       spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-void
-ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
+/**
+ * ilk_update_display_irq - update DEIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+                                  uint32_t interrupt_mask,
+                                  uint32_t enabled_irq_mask)
 {
+       uint32_t new_val;
+
        assert_spin_locked(&dev_priv->irq_lock);
 
+       WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
                return;
 
-       if ((dev_priv->irq_mask & mask) != mask) {
-               dev_priv->irq_mask |= mask;
+       new_val = dev_priv->irq_mask;
+       new_val &= ~interrupt_mask;
+       new_val |= (~enabled_irq_mask & interrupt_mask);
+
+       if (new_val != dev_priv->irq_mask) {
+               dev_priv->irq_mask = new_val;
                I915_WRITE(DEIMR, dev_priv->irq_mask);
                POSTING_READ(DEIMR);
        }
 }
 
+void
+ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
+{
+       ilk_update_display_irq(dev_priv, mask, mask);
+}
+
+void
+ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
+{
+       ilk_update_display_irq(dev_priv, mask, 0);
+}
+
 /**
  * ilk_update_gt_irq - update GTIMR
  * @dev_priv: driver private
@@ -350,6 +413,38 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
        synchronize_irq(dev->irq);
 }
 
+/**
+  * bdw_update_port_irq - update DE port interrupt
+  * @dev_priv: driver private
+  * @interrupt_mask: mask of interrupt bits to update
+  * @enabled_irq_mask: mask of interrupt bits to enable
+  */
+static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
+                               uint32_t interrupt_mask,
+                               uint32_t enabled_irq_mask)
+{
+       uint32_t new_val;
+       uint32_t old_val;
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       WARN_ON(enabled_irq_mask & ~interrupt_mask);
+
+       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+               return;
+
+       old_val = I915_READ(GEN8_DE_PORT_IMR);
+
+       new_val = old_val;
+       new_val &= ~interrupt_mask;
+       new_val |= (~enabled_irq_mask & interrupt_mask);
+
+       if (new_val != old_val) {
+               I915_WRITE(GEN8_DE_PORT_IMR, new_val);
+               POSTING_READ(GEN8_DE_PORT_IMR);
+       }
+}
+
 /**
  * ibx_display_interrupt_update - update SDEIMR
  * @dev_priv: driver private
@@ -554,7 +649,7 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
  *   of horizontal active on the first line of vertical active
  */
 
-static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        /* Gen2 doesn't have a hardware frame counter */
        return 0;
@@ -563,7 +658,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
-static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long high_frame;
@@ -611,7 +706,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
        return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
 }
 
-static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 gm45_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int reg = PIPE_FRMCOUNT_GM45(pipe);
@@ -672,14 +767,14 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
        return (position + crtc->scanline_offset) % vtotal;
 }
 
-static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
                                    unsigned int flags, int *vpos, int *hpos,
-                                   ktime_t *stime, ktime_t *etime)
+                                   ktime_t *stime, ktime_t *etime,
+                                   const struct drm_display_mode *mode)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
        int position;
        int vbl_start, vbl_end, hsync_start, htotal, vtotal;
        bool in_vbl = true;
@@ -809,34 +904,33 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
        return position;
 }
 
-static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
                              int *max_error,
                              struct timeval *vblank_time,
                              unsigned flags)
 {
        struct drm_crtc *crtc;
 
-       if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
-               DRM_ERROR("Invalid crtc %d\n", pipe);
+       if (pipe >= INTEL_INFO(dev)->num_pipes) {
+               DRM_ERROR("Invalid crtc %u\n", pipe);
                return -EINVAL;
        }
 
        /* Get drm_crtc to timestamp: */
        crtc = intel_get_crtc_for_pipe(dev, pipe);
        if (crtc == NULL) {
-               DRM_ERROR("Invalid crtc %d\n", pipe);
+               DRM_ERROR("Invalid crtc %u\n", pipe);
                return -EINVAL;
        }
 
        if (!crtc->hwmode.crtc_clock) {
-               DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+               DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
                return -EBUSY;
        }
 
        /* Helper routine in DRM core does all the work: */
        return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
                                                     vblank_time, flags,
-                                                    crtc,
                                                     &crtc->hwmode);
 }
 
@@ -1264,7 +1358,31 @@ static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
 {
        switch (port) {
        case PORT_A:
-               return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
+               return val & PORTA_HOTPLUG_LONG_DETECT;
+       case PORT_B:
+               return val & PORTB_HOTPLUG_LONG_DETECT;
+       case PORT_C:
+               return val & PORTC_HOTPLUG_LONG_DETECT;
+       default:
+               return false;
+       }
+}
+
+static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
+{
+       switch (port) {
+       case PORT_E:
+               return val & PORTE_HOTPLUG_LONG_DETECT;
+       default:
+               return false;
+       }
+}
+
+static bool spt_port_hotplug_long_detect(enum port port, u32 val)
+{
+       switch (port) {
+       case PORT_A:
+               return val & PORTA_HOTPLUG_LONG_DETECT;
        case PORT_B:
                return val & PORTB_HOTPLUG_LONG_DETECT;
        case PORT_C:
@@ -1276,6 +1394,16 @@ static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
        }
 }
 
+static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
+{
+       switch (port) {
+       case PORT_A:
+               return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
+       default:
+               return false;
+       }
+}
+
 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
 {
        switch (port) {
@@ -1285,8 +1413,6 @@ static bool pch_port_hotplug_long_detect(enum port port, u32 val)
                return val & PORTC_HOTPLUG_LONG_DETECT;
        case PORT_D:
                return val & PORTD_HOTPLUG_LONG_DETECT;
-       case PORT_E:
-               return val & PORTE_HOTPLUG_LONG_DETECT;
        default:
                return false;
        }
@@ -1306,7 +1432,13 @@ static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
        }
 }
 
-/* Get a bit mask of pins that have triggered, and which ones may be long. */
+/*
+ * Get a bit mask of pins that have triggered, and which ones may be long.
+ * This can be called multiple times with the same masks to accumulate
+ * hotplug detection results from several registers.
+ *
+ * Note that the caller is expected to zero out the masks initially.
+ */
 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
                             u32 hotplug_trigger, u32 dig_hotplug_reg,
                             const u32 hpd[HPD_NUM_PINS],
@@ -1315,9 +1447,6 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
        enum port port;
        int i;
 
-       *pin_mask = 0;
-       *long_mask = 0;
-
        for_each_hpd_pin(i) {
                if ((hpd[i] & hotplug_trigger) == 0)
                        continue;
@@ -1558,7 +1687,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
-       u32 pin_mask, long_mask;
+       u32 pin_mask = 0, long_mask = 0;
 
        if (!hotplug_status)
                return;
@@ -1573,20 +1702,25 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
        if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
-               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
-                                  hotplug_trigger, hpd_status_g4x,
-                                  i9xx_port_hotplug_long_detect);
-               intel_hpd_irq_handler(dev, pin_mask, long_mask);
+               if (hotplug_trigger) {
+                       intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                                          hotplug_trigger, hpd_status_g4x,
+                                          i9xx_port_hotplug_long_detect);
+
+                       intel_hpd_irq_handler(dev, pin_mask, long_mask);
+               }
 
                if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
                        dp_aux_irq_handler(dev);
        } else {
                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
-               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
-                                  hotplug_trigger, hpd_status_i915,
-                                  i9xx_port_hotplug_long_detect);
-               intel_hpd_irq_handler(dev, pin_mask, long_mask);
+               if (hotplug_trigger) {
+                       intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                                          hotplug_trigger, hpd_status_i915,
+                                          i9xx_port_hotplug_long_detect);
+                       intel_hpd_irq_handler(dev, pin_mask, long_mask);
+               }
        }
 }
 
@@ -1680,23 +1814,30 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
        return ret;
 }
 
+static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+                               const u32 hpd[HPD_NUM_PINS])
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
+
+       dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+       I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+       intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                          dig_hotplug_reg, hpd,
+                          pch_port_hotplug_long_detect);
+
+       intel_hpd_irq_handler(dev, pin_mask, long_mask);
+}
+
 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 
-       if (hotplug_trigger) {
-               u32 dig_hotplug_reg, pin_mask, long_mask;
-
-               dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-               I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
-
-               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
-                                  dig_hotplug_reg, hpd_ibx,
-                                  pch_port_hotplug_long_detect);
-               intel_hpd_irq_handler(dev, pin_mask, long_mask);
-       }
+       if (hotplug_trigger)
+               ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
 
        if (pch_iir & SDE_AUDIO_POWER_MASK) {
                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1787,38 +1928,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
-       u32 hotplug_trigger;
+       u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
-       if (HAS_PCH_SPT(dev))
-               hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
-       else
-               hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
-
-       if (hotplug_trigger) {
-               u32 dig_hotplug_reg, pin_mask, long_mask;
-
-               dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
-               I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
-
-               if (HAS_PCH_SPT(dev)) {
-                       intel_get_hpd_pins(&pin_mask, &long_mask,
-                                          hotplug_trigger,
-                                          dig_hotplug_reg, hpd_spt,
-                                          pch_port_hotplug_long_detect);
-
-                       /* detect PORTE HP event */
-                       dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
-                       if (pch_port_hotplug_long_detect(PORT_E,
-                                                        dig_hotplug_reg))
-                               long_mask |= 1 << HPD_PORT_E;
-               } else
-                       intel_get_hpd_pins(&pin_mask, &long_mask,
-                                          hotplug_trigger,
-                                          dig_hotplug_reg, hpd_cpt,
-                                          pch_port_hotplug_long_detect);
-
-               intel_hpd_irq_handler(dev, pin_mask, long_mask);
-       }
+       if (hotplug_trigger)
+               ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
 
        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -1849,10 +1962,67 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
                cpt_serr_int_handler(dev);
 }
 
+static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
+               ~SDE_PORTE_HOTPLUG_SPT;
+       u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
+       u32 pin_mask = 0, long_mask = 0;
+
+       if (hotplug_trigger) {
+               u32 dig_hotplug_reg;
+
+               dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+               I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                                  dig_hotplug_reg, hpd_spt,
+                                  spt_port_hotplug_long_detect);
+       }
+
+       if (hotplug2_trigger) {
+               u32 dig_hotplug_reg;
+
+               dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
+               I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
+
+               intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
+                                  dig_hotplug_reg, hpd_spt,
+                                  spt_port_hotplug2_long_detect);
+       }
+
+       if (pin_mask)
+               intel_hpd_irq_handler(dev, pin_mask, long_mask);
+
+       if (pch_iir & SDE_GMBUS_CPT)
+               gmbus_irq_handler(dev);
+}
+
+static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+                               const u32 hpd[HPD_NUM_PINS])
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
+
+       dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+       I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
+
+       intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                          dig_hotplug_reg, hpd,
+                          ilk_port_hotplug_long_detect);
+
+       intel_hpd_irq_handler(dev, pin_mask, long_mask);
+}
+
 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
+       u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
+
+       if (hotplug_trigger)
+               ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
 
        if (de_iir & DE_AUX_CHANNEL_A)
                dp_aux_irq_handler(dev);
@@ -1902,6 +2072,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
+       u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
+
+       if (hotplug_trigger)
+               ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
 
        if (de_iir & DE_ERR_INT_IVB)
                ivb_err_int_handler(dev);
@@ -2014,27 +2188,19 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        return ret;
 }
 
-static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
+static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+                               const u32 hpd[HPD_NUM_PINS])
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 hp_control, hp_trigger;
-       u32 pin_mask, long_mask;
-
-       /* Get the status */
-       hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
-       hp_control = I915_READ(BXT_HOTPLUG_CTL);
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
-       /* Hotplug not enabled ? */
-       if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
-               DRM_ERROR("Interrupt when HPD disabled\n");
-               return;
-       }
+       dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+       I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
 
-       /* Clear sticky bits in hpd status */
-       I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
+       intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
+                          dig_hotplug_reg, hpd,
+                          bxt_port_hotplug_long_detect);
 
-       intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
-                          hpd_bxt, bxt_port_hotplug_long_detect);
        intel_hpd_irq_handler(dev, pin_mask, long_mask);
 }
 
@@ -2051,7 +2217,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        if (!intel_irqs_enabled(dev_priv))
                return IRQ_NONE;
 
-       if (IS_GEN9(dev))
+       if (INTEL_INFO(dev_priv)->gen >= 9)
                aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
                        GEN9_AUX_CHANNEL_D;
 
@@ -2084,6 +2250,12 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                tmp = I915_READ(GEN8_DE_PORT_IIR);
                if (tmp) {
                        bool found = false;
+                       u32 hotplug_trigger = 0;
+
+                       if (IS_BROXTON(dev_priv))
+                               hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
+                       else if (IS_BROADWELL(dev_priv))
+                               hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
 
                        I915_WRITE(GEN8_DE_PORT_IIR, tmp);
                        ret = IRQ_HANDLED;
@@ -2093,8 +2265,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                                found = true;
                        }
 
-                       if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
-                               bxt_hpd_handler(dev, tmp);
+                       if (hotplug_trigger) {
+                               if (IS_BROXTON(dev))
+                                       bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
+                               else
+                                       ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
                                found = true;
                        }
 
@@ -2125,7 +2300,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                            intel_pipe_handle_vblank(dev, pipe))
                                intel_check_page_flip(dev, pipe);
 
-                       if (IS_GEN9(dev))
+                       if (INTEL_INFO(dev_priv)->gen >= 9)
                                flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
                        else
                                flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
@@ -2143,7 +2318,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                                                                    pipe);
 
 
-                       if (IS_GEN9(dev))
+                       if (INTEL_INFO(dev_priv)->gen >= 9)
                                fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
                        else
                                fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2167,7 +2342,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                if (pch_iir) {
                        I915_WRITE(SDEIIR, pch_iir);
                        ret = IRQ_HANDLED;
-                       cpt_irq_handler(dev, pch_iir);
+
+                       if (HAS_PCH_SPT(dev_priv))
+                               spt_irq_handler(dev, pch_iir);
+                       else
+                               cpt_irq_handler(dev, pch_iir);
                } else
                        DRM_ERROR("The master control interrupt lied (SDE)!\n");
 
@@ -2432,7 +2611,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-static int i915_enable_vblank(struct drm_device *dev, int pipe)
+static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -2449,7 +2628,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -2463,7 +2642,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
+static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -2476,7 +2655,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-static int gen8_enable_vblank(struct drm_device *dev, int pipe)
+static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -2492,7 +2671,7 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
 /* Called from drm generic code, passed 'crtc' which
  * we use as a pipe index
  */
-static void i915_disable_vblank(struct drm_device *dev, int pipe)
+static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -2504,7 +2683,7 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -2516,7 +2695,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
+static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -2527,7 +2706,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static void gen8_disable_vblank(struct drm_device *dev, int pipe)
+static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
@@ -2933,7 +3112,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
 {
        enum pipe pipe;
 
-       I915_WRITE(PORT_HOTPLUG_EN, 0);
+       i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
        for_each_pipe(dev_priv, pipe)
@@ -3027,86 +3206,124 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
        vlv_display_irq_reset(dev_priv);
 }
 
+static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
+                                 const u32 hpd[HPD_NUM_PINS])
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_encoder *encoder;
+       u32 enabled_irqs = 0;
+
+       for_each_intel_encoder(dev, encoder)
+               if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+                       enabled_irqs |= hpd[encoder->hpd_pin];
+
+       return enabled_irqs;
+}
+
 static void ibx_hpd_irq_setup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder;
-       u32 hotplug_irqs, hotplug, enabled_irqs = 0;
+       u32 hotplug_irqs, hotplug, enabled_irqs;
 
        if (HAS_PCH_IBX(dev)) {
                hotplug_irqs = SDE_HOTPLUG_MASK;
-               for_each_intel_encoder(dev, intel_encoder)
-                       if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
-                               enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
-       } else if (HAS_PCH_SPT(dev)) {
-               hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
-               for_each_intel_encoder(dev, intel_encoder)
-                       if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
-                               enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
+               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
        } else {
                hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
-               for_each_intel_encoder(dev, intel_encoder)
-                       if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
-                               enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
+               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
        }
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
        /*
         * Enable digital hotplug on the PCH, and configure the DP short pulse
-        * duration to 2ms (which is the minimum in the Display Port spec)
-        *
-        * This register is the same on all known PCH chips.
+        * duration to 2ms (which is the minimum in the Display Port spec).
+        * The pulse duration bits are reserved on LPT+.
         */
        hotplug = I915_READ(PCH_PORT_HOTPLUG);
        hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
        hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
        hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
        hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+       /*
+        * When CPU and PCH are on the same package, port A
+        * HPD must be enabled in both north and south.
+        */
+       if (HAS_PCH_LPT_LP(dev))
+               hotplug |= PORTA_HOTPLUG_ENABLE;
        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
 
-       /* enable SPT PORTE hot plug */
-       if (HAS_PCH_SPT(dev)) {
-               hotplug = I915_READ(PCH_PORT_HOTPLUG2);
-               hotplug |= PORTE_HOTPLUG_ENABLE;
-               I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
-       }
+static void spt_hpd_irq_setup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 hotplug_irqs, hotplug, enabled_irqs;
+
+       hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+       enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
+
+       ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+       /* Enable digital hotplug on the PCH */
+       hotplug = I915_READ(PCH_PORT_HOTPLUG);
+       hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
+               PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
+       I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+
+       hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+       hotplug |= PORTE_HOTPLUG_ENABLE;
+       I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
 }
 
-static void bxt_hpd_irq_setup(struct drm_device *dev)
+static void ilk_hpd_irq_setup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder;
-       u32 hotplug_port = 0;
-       u32 hotplug_ctrl;
-
-       /* Now, enable HPD */
-       for_each_intel_encoder(dev, intel_encoder) {
-               if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
-                               == HPD_ENABLED)
-                       hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
+       u32 hotplug_irqs, hotplug, enabled_irqs;
+
+       if (INTEL_INFO(dev)->gen >= 8) {
+               hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
+               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
+
+               bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+       } else if (INTEL_INFO(dev)->gen >= 7) {
+               hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
+               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
+
+               ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
+       } else {
+               hotplug_irqs = DE_DP_A_HOTPLUG;
+               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
+
+               ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
        }
 
-       /* Mask all HPD control bits */
-       hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
+       /*
+        * Enable digital hotplug on the CPU, and configure the DP short pulse
+        * duration to 2ms (which is the minimum in the Display Port spec)
+        * The pulse duration bits are reserved on HSW+.
+        */
+       hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+       hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
+       hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
+       I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+
+       ibx_hpd_irq_setup(dev);
+}
+
+static void bxt_hpd_irq_setup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 hotplug_irqs, hotplug, enabled_irqs;
 
-       /* Enable requested port in hotplug control */
-       /* TODO: implement (short) HPD support on port A */
-       WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
-       if (hotplug_port & BXT_DE_PORT_HP_DDIB)
-               hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
-       if (hotplug_port & BXT_DE_PORT_HP_DDIC)
-               hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
-       I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
+       enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
+       hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
 
-       /* Unmask DDI hotplug in IMR */
-       hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
-       I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
+       bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
 
-       /* Enable DDI hotplug in IER */
-       hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
-       I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
-       POSTING_READ(GEN8_DE_PORT_IER);
+       hotplug = I915_READ(PCH_PORT_HOTPLUG);
+       hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
+               PORTA_HOTPLUG_ENABLE;
+       I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 }
 
 static void ibx_irq_postinstall(struct drm_device *dev)
@@ -3174,15 +3391,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
                                DE_PLANEB_FLIP_DONE_IVB |
                                DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
                extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
-                             DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
+                             DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
+                             DE_DP_A_HOTPLUG_IVB);
        } else {
                display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                                DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
                                DE_AUX_CHANNEL_A |
                                DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
                                DE_POISON);
-               extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
-                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
+               extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
+                             DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+                             DE_DP_A_HOTPLUG);
        }
 
        dev_priv->irq_mask = ~display_mask;
@@ -3309,7 +3528,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 {
        dev_priv->irq_mask = ~0;
 
-       I915_WRITE(PORT_HOTPLUG_EN, 0);
+       i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
        POSTING_READ(PORT_HOTPLUG_EN);
 
        I915_WRITE(VLV_IIR, 0xffffffff);
@@ -3378,24 +3597,31 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 {
        uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
        uint32_t de_pipe_enables;
-       int pipe;
-       u32 de_port_en = GEN8_AUX_CHANNEL_A;
+       u32 de_port_masked = GEN8_AUX_CHANNEL_A;
+       u32 de_port_enables;
+       enum pipe pipe;
 
-       if (IS_GEN9(dev_priv)) {
+       if (INTEL_INFO(dev_priv)->gen >= 9) {
                de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
                                  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
-               de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
-                       GEN9_AUX_CHANNEL_D;
-
+               de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+                                 GEN9_AUX_CHANNEL_D;
                if (IS_BROXTON(dev_priv))
-                       de_port_en |= BXT_DE_PORT_GMBUS;
-       } else
+                       de_port_masked |= BXT_DE_PORT_GMBUS;
+       } else {
                de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
                                  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+       }
 
        de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
                                           GEN8_PIPE_FIFO_UNDERRUN;
 
+       de_port_enables = de_port_masked;
+       if (IS_BROXTON(dev_priv))
+               de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
+       else if (IS_BROADWELL(dev_priv))
+               de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
+
        dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
        dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
        dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
@@ -3407,7 +3633,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
                                          dev_priv->de_irq_mask[pipe],
                                          de_pipe_enables);
 
-       GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
+       GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
 }
 
 static int gen8_irq_postinstall(struct drm_device *dev)
@@ -3676,7 +3902,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
        int pipe;
 
        if (I915_HAS_HOTPLUG(dev)) {
-               I915_WRITE(PORT_HOTPLUG_EN, 0);
+               i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
        }
 
@@ -3710,7 +3936,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
                I915_USER_INTERRUPT;
 
        if (I915_HAS_HOTPLUG(dev)) {
-               I915_WRITE(PORT_HOTPLUG_EN, 0);
+               i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
                POSTING_READ(PORT_HOTPLUG_EN);
 
                /* Enable in IER... */
@@ -3872,7 +4098,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
        int pipe;
 
        if (I915_HAS_HOTPLUG(dev)) {
-               I915_WRITE(PORT_HOTPLUG_EN, 0);
+               i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
                I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
        }
 
@@ -3893,7 +4119,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 
-       I915_WRITE(PORT_HOTPLUG_EN, 0);
+       i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
        I915_WRITE(HWSTAM, 0xeffe);
@@ -3954,7 +4180,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
        I915_WRITE(IER, enable_mask);
        POSTING_READ(IER);
 
-       I915_WRITE(PORT_HOTPLUG_EN, 0);
+       i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
        POSTING_READ(PORT_HOTPLUG_EN);
 
        i915_enable_asle_pipestat(dev);
@@ -3965,29 +4191,26 @@ static int i965_irq_postinstall(struct drm_device *dev)
 static void i915_hpd_irq_setup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *intel_encoder;
        u32 hotplug_en;
 
        assert_spin_locked(&dev_priv->irq_lock);
 
-       hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-       hotplug_en &= ~HOTPLUG_INT_EN_MASK;
        /* Note HDMI and DP share hotplug bits */
        /* enable bits are the same for all generations */
-       for_each_intel_encoder(dev, intel_encoder)
-               if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
-                       hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
+       hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
        /* Programming the CRT detection parameters tends
           to generate a spurious hotplug event about three
           seconds later.  So just do it once.
        */
        if (IS_G4X(dev))
                hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
-       hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
        hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 
        /* Ignore TV since it's buggy */
-       I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+       i915_hotplug_interrupt_update_locked(dev_priv,
+                                     (HOTPLUG_INT_EN_MASK
+                                      | CRT_HOTPLUG_VOLTAGE_COMPARE_MASK),
+                                     hotplug_en);
 }
 
 static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -4100,7 +4323,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
        if (!dev_priv)
                return;
 
-       I915_WRITE(PORT_HOTPLUG_EN, 0);
+       i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
        I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
        I915_WRITE(HWSTAM, 0xffffffff);
@@ -4188,10 +4411,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->irq_uninstall = gen8_irq_uninstall;
                dev->driver->enable_vblank = gen8_enable_vblank;
                dev->driver->disable_vblank = gen8_disable_vblank;
-               if (HAS_PCH_SPLIT(dev))
-                       dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
-               else
+               if (IS_BROXTON(dev))
                        dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+               else if (HAS_PCH_SPT(dev))
+                       dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+               else
+                       dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev->driver->irq_handler = ironlake_irq_handler;
                dev->driver->irq_preinstall = ironlake_irq_reset;
@@ -4199,7 +4424,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->irq_uninstall = ironlake_irq_uninstall;
                dev->driver->enable_vblank = ironlake_enable_vblank;
                dev->driver->disable_vblank = ironlake_disable_vblank;
-               dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+               dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
        } else {
                if (INTEL_INFO(dev_priv)->gen == 2) {
                        dev->driver->irq_preinstall = i8xx_irq_preinstall;
index 5ae4b0aba56412e3c7d8560c5fe1d1484ae00675..ca9b8f644ffea7100214505058833dcbff0d4d60 100644 (file)
@@ -40,7 +40,6 @@ struct i915_params i915 __read_mostly = {
        .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
        .disable_power_well = 1,
        .enable_ips = 1,
-       .fastboot = 0,
        .prefault_disable = 0,
        .load_detect_test = 0,
        .reset = true,
@@ -51,6 +50,7 @@ struct i915_params i915 __read_mostly = {
        .use_mmio_flip = 0,
        .mmio_debug = 0,
        .verbose_state_checks = 1,
+       .nuclear_pageflip = 0,
        .edp_vswing = 0,
        .enable_guc_submission = false,
        .guc_log_level = -1,
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(modeset,
        "Use kernel modesetting [KMS] (0=disable, "
        "1=on, -1=force vga console preference [default])");
 
-module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
+module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
 MODULE_PARM_DESC(panel_ignore_lid,
        "Override lid status (0=autodetect, 1=autodetect disabled [default], "
        "-1=force lid closed, -2=force lid open)");
@@ -84,17 +84,17 @@ MODULE_PARM_DESC(enable_fbc,
        "Enable frame buffer compression for power savings "
        "(default: -1 (use per-chip default))");
 
-module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
+module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
 MODULE_PARM_DESC(lvds_channel_mode,
         "Specify LVDS channel mode "
         "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
 
-module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
+module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
 MODULE_PARM_DESC(lvds_use_ssc,
        "Use Spread Spectrum Clock with panels [LVDS/eDP] "
        "(default: auto from VBT)");
 
-module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
+module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
 MODULE_PARM_DESC(vbt_sdvo_panel_type,
        "Override/Ignore selection of SDVO panel mode in the VBT "
        "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
@@ -102,7 +102,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
 module_param_named_unsafe(reset, i915.reset, bool, 0600);
 MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
-module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
+module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
 MODULE_PARM_DESC(enable_hangcheck,
        "Periodically check GPU activity for detecting hangs. "
        "WARNING: Disabling this can cause system wide hangs. "
@@ -113,29 +113,25 @@ MODULE_PARM_DESC(enable_ppgtt,
        "Override PPGTT usage. "
        "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
 
-module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
+module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
 MODULE_PARM_DESC(enable_execlists,
        "Override execlists usage. "
        "(-1=auto [default], 0=disabled, 1=enabled)");
 
-module_param_named(enable_psr, i915.enable_psr, int, 0600);
+module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
 MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
 
-module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
+module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
 MODULE_PARM_DESC(preliminary_hw_support,
        "Enable preliminary hardware support.");
 
-module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
+module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600);
 MODULE_PARM_DESC(disable_power_well,
        "Disable the power well when possible (default: true)");
 
-module_param_named(enable_ips, i915.enable_ips, int, 0600);
+module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
 MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 
-module_param_named(fastboot, i915.fastboot, bool, 0600);
-MODULE_PARM_DESC(fastboot,
-       "Try to skip unnecessary mode sets at boot time (default: false)");
-
 module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
 MODULE_PARM_DESC(prefault_disable,
        "Disable page prefaulting for pread/pwrite/reloc (default:false). "
@@ -146,7 +142,7 @@ MODULE_PARM_DESC(load_detect_test,
        "Force-enable the VGA load detect code for testing (default:false). "
        "For developers only.");
 
-module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
+module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
 MODULE_PARM_DESC(invert_brightness,
        "Invert backlight brightness "
        "(-1 force normal, 0 machine defaults, 1 force inversion), please "
@@ -157,14 +153,14 @@ MODULE_PARM_DESC(invert_brightness,
 module_param_named(disable_display, i915.disable_display, bool, 0600);
 MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
 
-module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
+module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
 MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
 
-module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
+module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
 MODULE_PARM_DESC(enable_cmd_parser,
                 "Enable command parsing (1=enabled [default], 0=disabled)");
 
-module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
+module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
 MODULE_PARM_DESC(use_mmio_flip,
                 "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
 
@@ -177,6 +173,10 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
 MODULE_PARM_DESC(verbose_state_checks,
        "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
 
+module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
+MODULE_PARM_DESC(nuclear_pageflip,
+                "Force atomic modeset functionality; asynchronous mode is not yet supported. (default: false).");
+
 /* WA to get away with the default setting in VBT for early platforms.Will be removed */
 module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
 MODULE_PARM_DESC(edp_vswing,
index 83a0888756d68402af1c4211b51372edba53f19d..56157eb8719b91d7b396f76904bf7733a2e0cfae 100644 (file)
  */
 #define MI_LOAD_REGISTER_IMM(x)        MI_INSTR(0x22, 2*(x)-1)
 #define   MI_LRI_FORCE_POSTED          (1<<12)
-#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
-#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
+#define MI_STORE_REGISTER_MEM        MI_INSTR(0x24, 1)
+#define MI_STORE_REGISTER_MEM_GEN8   MI_INSTR(0x24, 2)
 #define   MI_SRM_LRM_GLOBAL_GTT                (1<<22)
 #define MI_FLUSH_DW            MI_INSTR(0x26, 1) /* for GEN6 */
 #define   MI_FLUSH_DW_STORE_INDEX      (1<<21)
 #define   MI_INVALIDATE_BSD            (1<<7)
 #define   MI_FLUSH_DW_USE_GTT          (1<<2)
 #define   MI_FLUSH_DW_USE_PPGTT                (0<<2)
-#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
-#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
+#define MI_LOAD_REGISTER_MEM      MI_INSTR(0x29, 1)
+#define MI_LOAD_REGISTER_MEM_GEN8  MI_INSTR(0x29, 2)
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE          (1)
 /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -1099,6 +1099,12 @@ enum skl_disp_power_wells {
 #define  DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE        1 /* 1: coarse & 0 : fine  */
 #define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
 
+#define _CHV_CMN_DW0_CH0               0x8100
+#define   DPIO_ALLDL_POWERDOWN_SHIFT_CH0       19
+#define   DPIO_ANYDL_POWERDOWN_SHIFT_CH0       18
+#define   DPIO_ALLDL_POWERDOWN                 (1 << 1)
+#define   DPIO_ANYDL_POWERDOWN                 (1 << 0)
+
 #define _CHV_CMN_DW5_CH0               0x8114
 #define   CHV_BUFRIGHTENA1_DISABLE     (0 << 20)
 #define   CHV_BUFRIGHTENA1_NORMAL      (1 << 20)
@@ -1135,10 +1141,23 @@ enum skl_disp_power_wells {
 
 #define _CHV_CMN_DW19_CH0              0x814c
 #define _CHV_CMN_DW6_CH1               0x8098
+#define   DPIO_ALLDL_POWERDOWN_SHIFT_CH1       30 /* CL2 DW6 only */
+#define   DPIO_ANYDL_POWERDOWN_SHIFT_CH1       29 /* CL2 DW6 only */
+#define   DPIO_DYNPWRDOWNEN_CH1                (1 << 28) /* CL2 DW6 only */
 #define   CHV_CMN_USEDCLKCHANNEL       (1 << 13)
+
 #define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
 
+#define CHV_CMN_DW28                   0x8170
+#define   DPIO_CL1POWERDOWNEN          (1 << 23)
+#define   DPIO_DYNPWRDOWNEN_CH0                (1 << 22)
+#define   DPIO_SUS_CLK_CONFIG_ON               (0 << 0)
+#define   DPIO_SUS_CLK_CONFIG_CLKREQ           (1 << 0)
+#define   DPIO_SUS_CLK_CONFIG_GATE             (2 << 0)
+#define   DPIO_SUS_CLK_CONFIG_GATE_CLKREQ      (3 << 0)
+
 #define CHV_CMN_DW30                   0x8178
+#define   DPIO_CL2_LDOFUSE_PWRENB      (1 << 6)
 #define   DPIO_LRC_BYPASS              (1 << 3)
 
 #define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
@@ -1508,7 +1527,7 @@ enum skl_disp_power_wells {
 #define GEN7_GFX_PEND_TLB0     0x4034
 #define GEN7_GFX_PEND_TLB1     0x4038
 /* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
-#define GEN7_LRA_LIMITS_BASE   0x403C
+#define GEN7_LRA_LIMITS(i)     (0x403C + (i) * 4)
 #define GEN7_LRA_LIMITS_REG_NUM        13
 #define GEN7_MEDIA_MAX_REQ_COUNT       0x4070
 #define GEN7_GFX_MAX_REQ_COUNT         0x4074
@@ -1674,11 +1693,18 @@ enum skl_disp_power_wells {
 #define GFX_MODE_GEN7  0x0229c
 #define RING_MODE_GEN7(ring)   ((ring)->mmio_base+0x29c)
 #define   GFX_RUN_LIST_ENABLE          (1<<15)
+#define   GFX_INTERRUPT_STEERING       (1<<14)
 #define   GFX_TLB_INVALIDATE_EXPLICIT  (1<<13)
 #define   GFX_SURFACE_FAULT_ENABLE     (1<<12)
 #define   GFX_REPLAY_MODE              (1<<11)
 #define   GFX_PSMI_GRANULARITY         (1<<10)
 #define   GFX_PPGTT_ENABLE             (1<<9)
+#define   GEN8_GFX_PPGTT_48B           (1<<7)
+
+#define   GFX_FORWARD_VBLANK_MASK      (3<<5)
+#define   GFX_FORWARD_VBLANK_NEVER     (0<<5)
+#define   GFX_FORWARD_VBLANK_ALWAYS    (1<<5)
+#define   GFX_FORWARD_VBLANK_COND      (2<<5)
 
 #define VLV_DISPLAY_BASE 0x180000
 #define VLV_MIPI_BASE VLV_DISPLAY_BASE
@@ -1985,7 +2011,7 @@ enum skl_disp_power_wells {
 #define   FBC_CTL_CPU_FENCE    (1<<1)
 #define   FBC_CTL_PLANE(plane) ((plane)<<0)
 #define FBC_FENCE_OFF          0x03218 /* BSpec typo has 321Bh */
-#define FBC_TAG                        0x03300
+#define FBC_TAG(i)             (0x03300 + (i) * 4)
 
 #define FBC_STATUS2            0x43214
 #define  FBC_COMPRESSION_MASK  0x7ff
@@ -2185,16 +2211,20 @@ enum skl_disp_power_wells {
 #define DPIO_PHY_STATUS                        (VLV_DISPLAY_BASE + 0x6240)
 #define   DPLL_PORTD_READY_MASK                (0xf)
 #define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
+#define   PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)   (1 << (2*(phy)+(ch)+27))
 #define   PHY_LDO_DELAY_0NS                    0x0
 #define   PHY_LDO_DELAY_200NS                  0x1
 #define   PHY_LDO_DELAY_600NS                  0x2
 #define   PHY_LDO_SEQ_DELAY(delay, phy)                ((delay) << (2*(phy)+23))
+#define   PHY_CH_POWER_DOWN_OVRD(mask, phy, ch)        ((mask) << (8*(phy)+4*(ch)+11))
 #define   PHY_CH_SU_PSR                                0x1
 #define   PHY_CH_DEEP_PSR                      0x7
 #define   PHY_CH_POWER_MODE(mode, phy, ch)     ((mode) << (6*(phy)+3*(ch)+2))
 #define   PHY_COM_LANE_RESET_DEASSERT(phy)     (1 << (phy))
 #define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
 #define   PHY_POWERGOOD(phy)   (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
+#define   PHY_STATUS_CMN_LDO(phy, ch)                   (1 << (6-(6*(phy)+3*(ch))))
+#define   PHY_STATUS_SPLINE_LDO(phy, ch, spline)        (1 << (8-(6*(phy)+3*(ch)+(spline))))
 
 /*
  * The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -2464,6 +2494,11 @@ enum skl_disp_power_wells {
 
 #define MCHBAR_MIRROR_BASE_SNB 0x140000
 
+#define CTG_STOLEN_RESERVED            (MCHBAR_MIRROR_BASE + 0x34)
+#define ELK_STOLEN_RESERVED            (MCHBAR_MIRROR_BASE + 0x48)
+#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
+#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
+
 /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
 #define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
 
@@ -2544,7 +2579,7 @@ enum skl_disp_power_wells {
 #define   TSFS_INTR_MASK       0x000000ff
 
 #define CRSTANDVID             0x11100
-#define PXVFREQ_BASE           0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ(i)             (0x11110 + (i) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
 #define   PXVFREQ_PX_MASK      0x7f000000
 #define   PXVFREQ_PX_SHIFT     24
 #define VIDFREQ_BASE           0x11110
@@ -2728,8 +2763,8 @@ enum skl_disp_power_wells {
 #define CSIEW0                 0x11250
 #define CSIEW1                 0x11254
 #define CSIEW2                 0x11258
-#define PEW                    0x1125c
-#define DEW                    0x11270
+#define PEW(i)                 (0x1125c + (i) * 4) /* 5 registers */
+#define DEW(i)                 (0x11270 + (i) * 4) /* 3 registers */
 #define MCHAFE                 0x112c0
 #define CSIEC                  0x112e0
 #define DMIEC                  0x112e4
@@ -2753,8 +2788,8 @@ enum skl_disp_power_wells {
 #define EG5                    0x11624
 #define EG6                    0x11628
 #define EG7                    0x1162c
-#define PXW                    0x11664
-#define PXWL                   0x11680
+#define PXW(i)                 (0x11664 + (i) * 4) /* 4 registers */
+#define PXWL(i)                        (0x11680 + (i) * 4) /* 8 registers */
 #define LCFUSE02               0x116c0
 #define   LCFUSE_HIV_MASK      0x000000ff
 #define CSIPLL0                        0x12c10
@@ -4047,14 +4082,10 @@ enum skl_disp_power_wells {
 # define TV_CC_DATA_1_MASK             0x0000007f
 # define TV_CC_DATA_1_SHIFT            0
 
-#define TV_H_LUMA_0            0x68100
-#define TV_H_LUMA_59           0x681ec
-#define TV_H_CHROMA_0          0x68200
-#define TV_H_CHROMA_59         0x682ec
-#define TV_V_LUMA_0            0x68300
-#define TV_V_LUMA_42           0x683a8
-#define TV_V_CHROMA_0          0x68400
-#define TV_V_CHROMA_42         0x684a8
+#define TV_H_LUMA(i)           (0x68100 + (i) * 4) /* 60 registers */
+#define TV_H_CHROMA(i)         (0x68200 + (i) * 4) /* 60 registers */
+#define TV_V_LUMA(i)           (0x68300 + (i) * 4) /* 43 registers */
+#define TV_V_CHROMA(i)         (0x68400 + (i) * 4) /* 43 registers */
 
 /* Display Port */
 #define DP_A                           0x64000 /* eDP */
@@ -4107,6 +4138,7 @@ enum skl_disp_power_wells {
 /* How many wires to use. I guess 3 was too hard */
 #define   DP_PORT_WIDTH(width)         (((width) - 1) << 19)
 #define   DP_PORT_WIDTH_MASK           (7 << 19)
+#define   DP_PORT_WIDTH_SHIFT          19
 
 /* Mystic DPCD version 1.1 special mode */
 #define   DP_ENHANCED_FRAMING          (1 << 18)
@@ -4617,6 +4649,7 @@ enum skl_disp_power_wells {
 
 #define CBR1_VLV                       (VLV_DISPLAY_BASE + 0x70400)
 #define  CBR_PND_DEADLINE_DISABLE      (1<<31)
+#define  CBR_PWM_CLOCK_MUX_SELECT      (1<<30)
 
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE     64
@@ -5363,15 +5396,17 @@ enum skl_disp_power_wells {
 
 #define CPU_VGACNTRL   0x41000
 
-#define DIGITAL_PORT_HOTPLUG_CNTRL      0x44030
-#define  DIGITAL_PORTA_HOTPLUG_ENABLE           (1 << 4)
-#define  DIGITAL_PORTA_SHORT_PULSE_2MS          (0 << 2)
-#define  DIGITAL_PORTA_SHORT_PULSE_4_5MS        (1 << 2)
-#define  DIGITAL_PORTA_SHORT_PULSE_6MS          (2 << 2)
-#define  DIGITAL_PORTA_SHORT_PULSE_100MS        (3 << 2)
-#define  DIGITAL_PORTA_NO_DETECT                (0 << 0)
-#define  DIGITAL_PORTA_LONG_PULSE_DETECT_MASK   (1 << 1)
-#define  DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK  (1 << 0)
+#define DIGITAL_PORT_HOTPLUG_CNTRL     0x44030
+#define  DIGITAL_PORTA_HOTPLUG_ENABLE          (1 << 4)
+#define  DIGITAL_PORTA_PULSE_DURATION_2ms      (0 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_PULSE_DURATION_4_5ms    (1 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_PULSE_DURATION_6ms      (2 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_PULSE_DURATION_100ms    (3 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_PULSE_DURATION_MASK     (3 << 2) /* pre-HSW */
+#define  DIGITAL_PORTA_HOTPLUG_STATUS_MASK     (3 << 0)
+#define  DIGITAL_PORTA_HOTPLUG_NO_DETECT       (0 << 0)
+#define  DIGITAL_PORTA_HOTPLUG_SHORT_DETECT    (1 << 0)
+#define  DIGITAL_PORTA_HOTPLUG_LONG_DETECT     (2 << 0)
 
 /* refresh rate hardware control */
 #define RR_HW_CTL       0x45300
@@ -5693,11 +5728,12 @@ enum skl_disp_power_wells {
 #define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
 #define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
 
-#define GEN8_BCS_IRQ_SHIFT 16
 #define GEN8_RCS_IRQ_SHIFT 0
-#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_BCS_IRQ_SHIFT 16
 #define GEN8_VCS1_IRQ_SHIFT 0
+#define GEN8_VCS2_IRQ_SHIFT 16
 #define GEN8_VECS_IRQ_SHIFT 0
+#define GEN8_WD_IRQ_SHIFT 16
 
 #define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
 #define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
@@ -5763,21 +5799,6 @@ enum skl_disp_power_wells {
 #define GEN8_PCU_IIR 0x444e8
 #define GEN8_PCU_IER 0x444ec
 
-/* BXT hotplug control */
-#define BXT_HOTPLUG_CTL                        0xC4030
-#define   BXT_DDIA_HPD_ENABLE          (1 << 28)
-#define   BXT_DDIA_HPD_STATUS          (3 << 24)
-#define   BXT_DDIC_HPD_ENABLE          (1 << 12)
-#define   BXT_DDIC_HPD_STATUS          (3 << 8)
-#define   BXT_DDIB_HPD_ENABLE          (1 << 4)
-#define   BXT_DDIB_HPD_STATUS          (3 << 0)
-#define   BXT_HOTPLUG_CTL_MASK         (BXT_DDIA_HPD_ENABLE | \
-                                        BXT_DDIB_HPD_ENABLE | \
-                                        BXT_DDIC_HPD_ENABLE)
-#define   BXT_HPD_STATUS_MASK          (BXT_DDIA_HPD_STATUS | \
-                                        BXT_DDIB_HPD_STATUS | \
-                                        BXT_DDIC_HPD_STATUS)
-
 #define ILK_DISPLAY_CHICKEN2   0x42004
 /* Required on all Ironlake and Sandybridge according to the B-Spec. */
 #define  ILK_ELPIN_409_SELECT  (1 << 25)
@@ -5950,6 +5971,7 @@ enum skl_disp_power_wells {
 #define SDE_AUXB_CPT           (1 << 25)
 #define SDE_AUX_MASK_CPT       (7 << 25)
 #define SDE_PORTE_HOTPLUG_SPT  (1 << 25)
+#define SDE_PORTA_HOTPLUG_SPT  (1 << 24)
 #define SDE_PORTD_HOTPLUG_CPT  (1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT  (1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT  (1 << 21)
@@ -5963,7 +5985,8 @@ enum skl_disp_power_wells {
 #define SDE_HOTPLUG_MASK_SPT   (SDE_PORTE_HOTPLUG_SPT |        \
                                 SDE_PORTD_HOTPLUG_CPT |        \
                                 SDE_PORTC_HOTPLUG_CPT |        \
-                                SDE_PORTB_HOTPLUG_CPT)
+                                SDE_PORTB_HOTPLUG_CPT |        \
+                                SDE_PORTA_HOTPLUG_SPT)
 #define SDE_GMBUS_CPT          (1 << 17)
 #define SDE_ERROR_CPT          (1 << 16)
 #define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
@@ -5998,46 +6021,46 @@ enum skl_disp_power_wells {
 #define  SERR_INT_TRANS_FIFO_UNDERRUN(pipe)    (1<<(pipe*3))
 
 /* digital port hotplug */
-#define PCH_PORT_HOTPLUG        0xc4030                /* SHOTPLUG_CTL */
-#define BXT_PORTA_HOTPLUG_ENABLE       (1 << 28)
-#define BXT_PORTA_HOTPLUG_STATUS_MASK  (0x3 << 24)
-#define  BXT_PORTA_HOTPLUG_NO_DETECT   (0 << 24)
-#define  BXT_PORTA_HOTPLUG_SHORT_DETECT        (1 << 24)
-#define  BXT_PORTA_HOTPLUG_LONG_DETECT (2 << 24)
-#define PORTD_HOTPLUG_ENABLE            (1 << 20)
-#define PORTD_PULSE_DURATION_2ms        (0)
-#define PORTD_PULSE_DURATION_4_5ms      (1 << 18)
-#define PORTD_PULSE_DURATION_6ms        (2 << 18)
-#define PORTD_PULSE_DURATION_100ms      (3 << 18)
-#define PORTD_PULSE_DURATION_MASK      (3 << 18)
-#define PORTD_HOTPLUG_STATUS_MASK      (0x3 << 16)
+#define PCH_PORT_HOTPLUG               0xc4030 /* SHOTPLUG_CTL */
+#define  PORTA_HOTPLUG_ENABLE          (1 << 28) /* LPT:LP+ & BXT */
+#define  PORTA_HOTPLUG_STATUS_MASK     (3 << 24) /* SPT+ & BXT */
+#define  PORTA_HOTPLUG_NO_DETECT       (0 << 24) /* SPT+ & BXT */
+#define  PORTA_HOTPLUG_SHORT_DETECT    (1 << 24) /* SPT+ & BXT */
+#define  PORTA_HOTPLUG_LONG_DETECT     (2 << 24) /* SPT+ & BXT */
+#define  PORTD_HOTPLUG_ENABLE          (1 << 20)
+#define  PORTD_PULSE_DURATION_2ms      (0 << 18) /* pre-LPT */
+#define  PORTD_PULSE_DURATION_4_5ms    (1 << 18) /* pre-LPT */
+#define  PORTD_PULSE_DURATION_6ms      (2 << 18) /* pre-LPT */
+#define  PORTD_PULSE_DURATION_100ms    (3 << 18) /* pre-LPT */
+#define  PORTD_PULSE_DURATION_MASK     (3 << 18) /* pre-LPT */
+#define  PORTD_HOTPLUG_STATUS_MASK     (3 << 16)
 #define  PORTD_HOTPLUG_NO_DETECT       (0 << 16)
 #define  PORTD_HOTPLUG_SHORT_DETECT    (1 << 16)
 #define  PORTD_HOTPLUG_LONG_DETECT     (2 << 16)
-#define PORTC_HOTPLUG_ENABLE            (1 << 12)
-#define PORTC_PULSE_DURATION_2ms        (0)
-#define PORTC_PULSE_DURATION_4_5ms      (1 << 10)
-#define PORTC_PULSE_DURATION_6ms        (2 << 10)
-#define PORTC_PULSE_DURATION_100ms      (3 << 10)
-#define PORTC_PULSE_DURATION_MASK      (3 << 10)
-#define PORTC_HOTPLUG_STATUS_MASK      (0x3 << 8)
+#define  PORTC_HOTPLUG_ENABLE          (1 << 12)
+#define  PORTC_PULSE_DURATION_2ms      (0 << 10) /* pre-LPT */
+#define  PORTC_PULSE_DURATION_4_5ms    (1 << 10) /* pre-LPT */
+#define  PORTC_PULSE_DURATION_6ms      (2 << 10) /* pre-LPT */
+#define  PORTC_PULSE_DURATION_100ms    (3 << 10) /* pre-LPT */
+#define  PORTC_PULSE_DURATION_MASK     (3 << 10) /* pre-LPT */
+#define  PORTC_HOTPLUG_STATUS_MASK     (3 << 8)
 #define  PORTC_HOTPLUG_NO_DETECT       (0 << 8)
 #define  PORTC_HOTPLUG_SHORT_DETECT    (1 << 8)
 #define  PORTC_HOTPLUG_LONG_DETECT     (2 << 8)
-#define PORTB_HOTPLUG_ENABLE            (1 << 4)
-#define PORTB_PULSE_DURATION_2ms        (0)
-#define PORTB_PULSE_DURATION_4_5ms      (1 << 2)
-#define PORTB_PULSE_DURATION_6ms        (2 << 2)
-#define PORTB_PULSE_DURATION_100ms      (3 << 2)
-#define PORTB_PULSE_DURATION_MASK      (3 << 2)
-#define PORTB_HOTPLUG_STATUS_MASK      (0x3 << 0)
+#define  PORTB_HOTPLUG_ENABLE          (1 << 4)
+#define  PORTB_PULSE_DURATION_2ms      (0 << 2) /* pre-LPT */
+#define  PORTB_PULSE_DURATION_4_5ms    (1 << 2) /* pre-LPT */
+#define  PORTB_PULSE_DURATION_6ms      (2 << 2) /* pre-LPT */
+#define  PORTB_PULSE_DURATION_100ms    (3 << 2) /* pre-LPT */
+#define  PORTB_PULSE_DURATION_MASK     (3 << 2) /* pre-LPT */
+#define  PORTB_HOTPLUG_STATUS_MASK     (3 << 0)
 #define  PORTB_HOTPLUG_NO_DETECT       (0 << 0)
 #define  PORTB_HOTPLUG_SHORT_DETECT    (1 << 0)
 #define  PORTB_HOTPLUG_LONG_DETECT     (2 << 0)
 
-#define PCH_PORT_HOTPLUG2        0xc403C               /* SHOTPLUG_CTL2 */
-#define PORTE_HOTPLUG_ENABLE            (1 << 4)
-#define PORTE_HOTPLUG_STATUS_MASK      (0x3 << 0)
+#define PCH_PORT_HOTPLUG2              0xc403C /* SHOTPLUG_CTL2 SPT+ */
+#define  PORTE_HOTPLUG_ENABLE          (1 << 4)
+#define  PORTE_HOTPLUG_STATUS_MASK     (3 << 0)
 #define  PORTE_HOTPLUG_NO_DETECT       (0 << 0)
 #define  PORTE_HOTPLUG_SHORT_DETECT    (1 << 0)
 #define  PORTE_HOTPLUG_LONG_DETECT     (2 << 0)
@@ -6304,9 +6327,11 @@ enum skl_disp_power_wells {
 #define  FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
 #define  FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
 #define  FDI_BC_BIFURCATION_SELECT     (1 << 12)
+#define  SPT_PWM_GRANULARITY           (1<<0)
 #define SOUTH_CHICKEN2         0xc2004
 #define  FDI_MPHY_IOSFSB_RESET_STATUS  (1<<13)
 #define  FDI_MPHY_IOSFSB_RESET_CTL     (1<<12)
+#define  LPT_PWM_GRANULARITY           (1<<5)
 #define  DPLS_EDP_PPS_FIX_DIS          (1<<0)
 
 #define _FDI_RXA_CHICKEN         0xc200c
@@ -6784,7 +6809,7 @@ enum skl_disp_power_wells {
                                                 GEN6_PM_RP_DOWN_THRESHOLD | \
                                                 GEN6_PM_RP_DOWN_TIMEOUT)
 
-#define GEN7_GT_SCRATCH_BASE                   0x4F100
+#define GEN7_GT_SCRATCH(i)                     (0x4F100 + (i) * 4)
 #define GEN7_GT_SCRATCH_REG_NUM                        8
 
 #define VLV_GTLC_SURVIVABILITY_REG              0x130098
@@ -6870,7 +6895,10 @@ enum skl_disp_power_wells {
 #define   GEN9_PGCTL_SSB_EU311_ACK     (1 << 14)
 
 #define GEN7_MISCCPCTL                 (0x9424)
-#define   GEN7_DOP_CLOCK_GATE_ENABLE   (1<<0)
+#define   GEN7_DOP_CLOCK_GATE_ENABLE           (1<<0)
+#define   GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE     (1<<2)
+#define   GEN8_DOP_CLOCK_GATE_GUC_ENABLE       (1<<4)
+#define   GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE     (1<<6)
 
 #define GEN8_GARBCNTL                   0xB004
 #define   GEN9_GAPS_TSV_CREDIT_DISABLE  (1<<7)
@@ -6916,6 +6944,9 @@ enum skl_disp_power_wells {
 #define HSW_ROW_CHICKEN3               0xe49c
 #define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
 
+#define HALF_SLICE_CHICKEN2            0xe180
+#define   GEN8_ST_PO_DISABLE           (1<<13)
+
 #define HALF_SLICE_CHICKEN3            0xe184
 #define   HSW_SAMPLE_C_PERFORMANCE     (1<<9)
 #define   GEN8_CENTROID_PIXEL_OPT_DIS  (1<<8)
@@ -7159,12 +7190,15 @@ enum skl_disp_power_wells {
 #define  DDI_BUF_IS_IDLE                       (1<<7)
 #define  DDI_A_4_LANES                         (1<<4)
 #define  DDI_PORT_WIDTH(width)                 (((width) - 1) << 1)
+#define  DDI_PORT_WIDTH_MASK                   (7 << 1)
+#define  DDI_PORT_WIDTH_SHIFT                  1
 #define  DDI_INIT_DISPLAY_DETECTED             (1<<0)
 
 /* DDI Buffer Translations */
 #define DDI_BUF_TRANS_A                                0x64E00
 #define DDI_BUF_TRANS_B                                0x64E60
-#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
+#define DDI_BUF_TRANS_LO(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8)
+#define DDI_BUF_TRANS_HI(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8 + 4)
 
 /* Sideband Interface (SBI) is programmed indirectly, via
  * SBI_ADDR, which contains the register offset; and SBI_DATA,
@@ -7475,6 +7509,44 @@ enum skl_disp_power_wells {
 
 #define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c)   /* ports A and C only */
 
+/* BXT MIPI mode configure */
+#define  _BXT_MIPIA_TRANS_HACTIVE                      0x6B0F8
+#define  _BXT_MIPIC_TRANS_HACTIVE                      0x6B8F8
+#define  BXT_MIPI_TRANS_HACTIVE(tc)    _MIPI_PORT(tc, \
+               _BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
+
+#define  _BXT_MIPIA_TRANS_VACTIVE                      0x6B0FC
+#define  _BXT_MIPIC_TRANS_VACTIVE                      0x6B8FC
+#define  BXT_MIPI_TRANS_VACTIVE(tc)    _MIPI_PORT(tc, \
+               _BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
+
+#define  _BXT_MIPIA_TRANS_VTOTAL                       0x6B100
+#define  _BXT_MIPIC_TRANS_VTOTAL                       0x6B900
+#define  BXT_MIPI_TRANS_VTOTAL(tc)     _MIPI_PORT(tc, \
+               _BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
+
+#define BXT_DSI_PLL_CTL                        0x161000
+#define  BXT_DSI_PLL_PVD_RATIO_SHIFT   16
+#define  BXT_DSI_PLL_PVD_RATIO_MASK    (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+#define  BXT_DSI_PLL_PVD_RATIO_1       (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+#define  BXT_DSIC_16X_BY2              (1 << 10)
+#define  BXT_DSIC_16X_BY3              (2 << 10)
+#define  BXT_DSIC_16X_BY4              (3 << 10)
+#define  BXT_DSIA_16X_BY2              (1 << 8)
+#define  BXT_DSIA_16X_BY3              (2 << 8)
+#define  BXT_DSIA_16X_BY4              (3 << 8)
+#define  BXT_DSI_FREQ_SEL_SHIFT                8
+#define  BXT_DSI_FREQ_SEL_MASK         (0xF << BXT_DSI_FREQ_SEL_SHIFT)
+
+#define BXT_DSI_PLL_RATIO_MAX          0x7D
+#define BXT_DSI_PLL_RATIO_MIN          0x22
+#define BXT_DSI_PLL_RATIO_MASK         0xFF
+#define BXT_REF_CLOCK_KHZ              19500
+
+#define BXT_DSI_PLL_ENABLE             0x46080
+#define  BXT_DSI_PLL_DO_ENABLE         (1 << 31)
+#define  BXT_DSI_PLL_LOCKED            (1 << 30)
+
 #define _MIPIA_PORT_CTRL                       (VLV_DISPLAY_BASE + 0x61190)
 #define _MIPIC_PORT_CTRL                       (VLV_DISPLAY_BASE + 0x61700)
 #define MIPI_PORT_CTRL(port)   _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
@@ -7888,6 +7960,11 @@ enum skl_disp_power_wells {
 #define  READ_REQUEST_PRIORITY_HIGH                    (3 << 3)
 #define  RGB_FLIP_TO_BGR                               (1 << 2)
 
+#define  BXT_PIPE_SELECT_MASK                          (7 << 7)
+#define  BXT_PIPE_SELECT_C                             (2 << 7)
+#define  BXT_PIPE_SELECT_B                             (1 << 7)
+#define  BXT_PIPE_SELECT_A                             (0 << 7)
+
 #define _MIPIA_DATA_ADDRESS            (dev_priv->mipi_mmio_base + 0xb108)
 #define _MIPIC_DATA_ADDRESS            (dev_priv->mipi_mmio_base + 0xb908)
 #define MIPI_DATA_ADDRESS(port)                _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \
index 2f34c47bd4bfb7a566453475cd5be6f16b4b7872..d0993bc814ea5beeffa9126780b84ec16b4ba0ba 100644 (file)
@@ -17,8 +17,8 @@
 /* pipe updates */
 
 TRACE_EVENT(i915_pipe_update_start,
-           TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max),
-           TP_ARGS(crtc, min, max),
+           TP_PROTO(struct intel_crtc *crtc),
+           TP_ARGS(crtc),
 
            TP_STRUCT__entry(
                             __field(enum pipe, pipe)
@@ -33,8 +33,8 @@ TRACE_EVENT(i915_pipe_update_start,
                           __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
                                                                                       crtc->pipe);
                           __entry->scanline = intel_get_crtc_scanline(crtc);
-                          __entry->min = min;
-                          __entry->max = max;
+                          __entry->min = crtc->debug.min_vbl;
+                          __entry->max = crtc->debug.max_vbl;
                           ),
 
            TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
@@ -43,8 +43,8 @@ TRACE_EVENT(i915_pipe_update_start,
 );
 
 TRACE_EVENT(i915_pipe_update_vblank_evaded,
-           TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame),
-           TP_ARGS(crtc, min, max, frame),
+           TP_PROTO(struct intel_crtc *crtc),
+           TP_ARGS(crtc),
 
            TP_STRUCT__entry(
                             __field(enum pipe, pipe)
@@ -56,10 +56,10 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
 
            TP_fast_assign(
                           __entry->pipe = crtc->pipe;
-                          __entry->frame = frame;
-                          __entry->scanline = intel_get_crtc_scanline(crtc);
-                          __entry->min = min;
-                          __entry->max = max;
+                          __entry->frame = crtc->debug.start_vbl_count;
+                          __entry->scanline = crtc->debug.scanline_start;
+                          __entry->min = crtc->debug.min_vbl;
+                          __entry->max = crtc->debug.max_vbl;
                           ),
 
            TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
@@ -68,8 +68,8 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
 );
 
 TRACE_EVENT(i915_pipe_update_end,
-           TP_PROTO(struct intel_crtc *crtc, u32 frame),
-           TP_ARGS(crtc, frame),
+           TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
+           TP_ARGS(crtc, frame, scanline_end),
 
            TP_STRUCT__entry(
                             __field(enum pipe, pipe)
@@ -80,7 +80,7 @@ TRACE_EVENT(i915_pipe_update_end,
            TP_fast_assign(
                           __entry->pipe = crtc->pipe;
                           __entry->frame = frame;
-                          __entry->scanline = intel_get_crtc_scanline(crtc);
+                          __entry->scanline = scanline_end;
                           ),
 
            TP_printk("pipe %c, frame=%u, scanline=%u",
@@ -186,33 +186,49 @@ DEFINE_EVENT(i915_va, i915_va_alloc,
             TP_ARGS(vm, start, length, name)
 );
 
-DECLARE_EVENT_CLASS(i915_page_table_entry,
-       TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
-       TP_ARGS(vm, pde, start, pde_shift),
+DECLARE_EVENT_CLASS(i915_px_entry,
+       TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
+       TP_ARGS(vm, px, start, px_shift),
 
        TP_STRUCT__entry(
                __field(struct i915_address_space *, vm)
-               __field(u32, pde)
+               __field(u32, px)
                __field(u64, start)
                __field(u64, end)
        ),
 
        TP_fast_assign(
                __entry->vm = vm;
-               __entry->pde = pde;
+               __entry->px = px;
                __entry->start = start;
-               __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1;
+               __entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
        ),
 
        TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
-                 __entry->vm, __entry->pde, __entry->start, __entry->end)
+                 __entry->vm, __entry->px, __entry->start, __entry->end)
 );
 
-DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc,
+DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
             TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
             TP_ARGS(vm, pde, start, pde_shift)
 );
 
+DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
+                  TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
+                  TP_ARGS(vm, pdpe, start, pdpe_shift),
+
+                  TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
+                            __entry->vm, __entry->px, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
+                  TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
+                  TP_ARGS(vm, pml4e, start, pml4e_shift),
+
+                  TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
+                            __entry->vm, __entry->px, __entry->start, __entry->end)
+);
+
 /* Avoid extra math because we only support two sizes. The format is defined by
  * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
 #define TRACE_PT_SIZE(bits) \
index 97a88b5f6a260013c6d0f315b4bc297fd4d2ab26..21c97f44d637b24b1d20dc1383a67bd5f65eaf8c 100644 (file)
 #define INTEL_VGT_IF_VERSION \
        INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
 
+/*
+ * notifications from guest to vgpu device model
+ */
+enum vgt_g2v_type {
+       VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
+       VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
+       VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
+       VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
+       VGT_G2V_EXECLIST_CONTEXT_CREATE,
+       VGT_G2V_EXECLIST_CONTEXT_DESTROY,
+       VGT_G2V_MAX,
+};
+
 struct vgt_if {
        uint64_t magic;         /* VGT_MAGIC */
        uint16_t version_major;
@@ -70,11 +83,28 @@ struct vgt_if {
        uint32_t rsv3[0x200 - 24];      /* pad to half page */
        /*
         * The bottom half page is for response from Gfx driver to hypervisor.
-        * Set to reserved fields temporarily by now.
         */
        uint32_t rsv4;
        uint32_t display_ready; /* ready for display owner switch */
-       uint32_t rsv5[0x200 - 2];       /* pad to one page */
+
+       uint32_t rsv5[4];
+
+       uint32_t g2v_notify;
+       uint32_t rsv6[7];
+
+       uint32_t pdp0_lo;
+       uint32_t pdp0_hi;
+       uint32_t pdp1_lo;
+       uint32_t pdp1_hi;
+       uint32_t pdp2_lo;
+       uint32_t pdp2_hi;
+       uint32_t pdp3_lo;
+       uint32_t pdp3_hi;
+
+       uint32_t execlist_context_descriptor_lo;
+       uint32_t execlist_context_descriptor_hi;
+
+       uint32_t  rsv7[0x200 - 24];    /* pad to one page */
 } __packed;
 
 #define vgtif_reg(x) \
index d96eee1ae9c560bb1559c5e911e2a84ced425051..8b13b9d0373a6ca3a33f5ff27944af34baf63487 100644 (file)
@@ -146,7 +146,7 @@ static bool intel_dsm_detect(void)
 
        if (vga_count == 2 && has_dsm) {
                acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
-               DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
+               DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
                                 acpi_method_name);
                return true;
        }
index e2531cf59266e77208c766b36c5291d4a939a825..f1975f267710ba22f0569c3abfa026dc10b5985b 100644 (file)
@@ -85,21 +85,15 @@ intel_connector_atomic_get_property(struct drm_connector *connector,
 struct drm_crtc_state *
 intel_crtc_duplicate_state(struct drm_crtc *crtc)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *crtc_state;
 
-       if (WARN_ON(!intel_crtc->config))
-               crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-       else
-               crtc_state = kmemdup(intel_crtc->config,
-                                    sizeof(*intel_crtc->config), GFP_KERNEL);
-
+       crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
        if (!crtc_state)
                return NULL;
 
        __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
 
-       crtc_state->base.crtc = crtc;
+       crtc_state->update_pipe = false;
 
        return &crtc_state->base;
 }
@@ -149,9 +143,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
        int i, j;
 
        num_scalers_need = hweight32(scaler_state->scaler_users);
-       DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
-               crtc_state, num_scalers_need, intel_crtc->num_scalers,
-               scaler_state->scaler_users);
 
        /*
         * High level flow:
index f1ab8e4b9c11c6b75534c52ad5e3bc1b05000dce..a119806965951eb15b9cbc66daa898e4ad52b3b4 100644 (file)
@@ -76,11 +76,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
        struct drm_plane_state *state;
        struct intel_plane_state *intel_state;
 
-       if (WARN_ON(!plane->state))
-               intel_state = intel_create_plane_state(plane);
-       else
-               intel_state = kmemdup(plane->state, sizeof(*intel_state),
-                                     GFP_KERNEL);
+       intel_state = kmemdup(plane->state, sizeof(*intel_state), GFP_KERNEL);
 
        if (!intel_state)
                return NULL;
index 2a5c76faf9f8dbc19ec6d17c490ff833d61d6afd..e35997ebb3311f6c6a405dc2b10dc4e421eb4e47 100644 (file)
@@ -404,7 +404,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        enum port port = intel_dig_port->port;
 
-       connector = drm_select_eld(encoder, mode);
+       connector = drm_select_eld(encoder);
        if (!connector)
                return;
 
index c19e669ffe504e32218afd4aee47670afc3982c9..68421c273c8c146440199c12d7cc0834f989aaee 100644 (file)
@@ -1350,21 +1350,3 @@ intel_parse_bios(struct drm_device *dev)
 
        return 0;
 }
-
-/* Ensure that vital registers have been initialised, even if the BIOS
- * is absent or just failing to do its job.
- */
-void intel_setup_bios(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-        /* Set the Panel Power On/Off timings if uninitialized. */
-       if (!HAS_PCH_SPLIT(dev) &&
-           I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
-               /* Set T2 to 40ms and T5 to 200ms */
-               I915_WRITE(PP_ON_DELAYS, 0x019007d0);
-
-               /* Set T3 to 35ms and Tx to 200ms */
-               I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
-       }
-}
index 46cd5c7ebacd3e8873b624e0cfe0e1d03ba72aa8..7ec8c9aefb849be22fac3b818097d49bda8d1377 100644 (file)
@@ -588,7 +588,6 @@ struct bdb_psr {
        struct psr_table psr_table[16];
 } __packed;
 
-void intel_setup_bios(struct drm_device *dev);
 int intel_parse_bios(struct drm_device *dev);
 
 /*
@@ -742,7 +741,6 @@ int intel_parse_bios(struct drm_device *dev);
  */
 #define DEVICE_TYPE_eDP_BITS \
        (DEVICE_TYPE_INTERNAL_CONNECTOR | \
-        DEVICE_TYPE_NOT_HDMI_OUTPUT | \
         DEVICE_TYPE_MIPI_OUTPUT | \
         DEVICE_TYPE_COMPOSITE_OUTPUT | \
         DEVICE_TYPE_DUAL_CHANNEL | \
@@ -750,7 +748,6 @@ int intel_parse_bios(struct drm_device *dev);
         DEVICE_TYPE_TMDS_DVI_SIGNALING | \
         DEVICE_TYPE_VIDEO_SIGNALING | \
         DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
-        DEVICE_TYPE_DIGITAL_OUTPUT | \
         DEVICE_TYPE_ANALOG_OUTPUT)
 
 /* define the DVO port for HDMI output type */
index af5e43bef4a41003437a7f1f2979992d065193ac..6ce38e3edf21a9b215e201b637ffb3a3bccdd417 100644 (file)
@@ -376,7 +376,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 hotplug_en, orig, stat;
+       u32 stat;
        bool ret = false;
        int i, tries = 0;
 
@@ -395,12 +395,12 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
                tries = 2;
        else
                tries = 1;
-       hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
-       hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
 
        for (i = 0; i < tries ; i++) {
                /* turn on the FORCE_DETECT */
-               I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+               i915_hotplug_interrupt_update(dev_priv,
+                                             CRT_HOTPLUG_FORCE_DETECT,
+                                             CRT_HOTPLUG_FORCE_DETECT);
                /* wait for FORCE_DETECT to go off */
                if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
                              CRT_HOTPLUG_FORCE_DETECT) == 0,
@@ -415,8 +415,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
        /* clear the interrupt we just generated, if any */
        I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
 
-       /* and put the bits back */
-       I915_WRITE(PORT_HOTPLUG_EN, orig);
+       i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
 
        return ret;
 }
index d0f1b8d833cd2d890e0328df50cf5839cd3fba2f..3427dd41d682139cc3fcc4c19283f3a4c7f39b6a 100644 (file)
  */
 
 #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
 
 MODULE_FIRMWARE(I915_CSR_SKL);
+MODULE_FIRMWARE(I915_CSR_BXT);
 
 /*
 * SKL CSR registers for DC5 and DC6
 */
-#define CSR_PROGRAM_BASE               0x80000
+#define CSR_PROGRAM(i)                 (0x80000 + (i) * 4)
 #define CSR_SSP_BASE_ADDR_GEN9         0x00002FC0
 #define CSR_HTP_ADDR_SKL               0x00500034
 #define CSR_SSP_BASE                   0x8F074
@@ -181,11 +183,19 @@ static const struct stepping_info skl_stepping_info[] = {
                {'G', '0'}, {'H', '0'}, {'I', '0'}
 };
 
+static struct stepping_info bxt_stepping_info[] = {
+       {'A', '0'}, {'A', '1'}, {'A', '2'},
+       {'B', '0'}, {'B', '1'}, {'B', '2'}
+};
+
 static char intel_get_stepping(struct drm_device *dev)
 {
        if (IS_SKYLAKE(dev) && (dev->pdev->revision <
                        ARRAY_SIZE(skl_stepping_info)))
                return skl_stepping_info[dev->pdev->revision].stepping;
+       else if (IS_BROXTON(dev) && (dev->pdev->revision <
+                               ARRAY_SIZE(bxt_stepping_info)))
+               return bxt_stepping_info[dev->pdev->revision].stepping;
        else
                return -ENODATA;
 }
@@ -195,6 +205,9 @@ static char intel_get_substepping(struct drm_device *dev)
        if (IS_SKYLAKE(dev) && (dev->pdev->revision <
                        ARRAY_SIZE(skl_stepping_info)))
                return skl_stepping_info[dev->pdev->revision].substepping;
+       else if (IS_BROXTON(dev) && (dev->pdev->revision <
+                       ARRAY_SIZE(bxt_stepping_info)))
+               return bxt_stepping_info[dev->pdev->revision].substepping;
        else
                return -ENODATA;
 }
@@ -255,8 +268,7 @@ void intel_csr_load_program(struct drm_device *dev)
        mutex_lock(&dev_priv->csr_lock);
        fw_size = dev_priv->csr.dmc_fw_size;
        for (i = 0; i < fw_size; i++)
-               I915_WRITE(CSR_PROGRAM_BASE + i * 4,
-                       payload[i]);
+               I915_WRITE(CSR_PROGRAM(i), payload[i]);
 
        for (i = 0; i < dev_priv->csr.mmio_count; i++) {
                I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -409,6 +421,8 @@ void intel_csr_ucode_init(struct drm_device *dev)
 
        if (IS_SKYLAKE(dev))
                csr->fw_path = I915_CSR_SKL;
+       else if (IS_BROXTON(dev_priv))
+               csr->fw_path = I915_CSR_BXT;
        else {
                DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
                intel_csr_load_status_set(dev_priv, FW_FAILED);
@@ -454,10 +468,10 @@ void intel_csr_ucode_fini(struct drm_device *dev)
 
 void assert_csr_loaded(struct drm_i915_private *dev_priv)
 {
-       WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED,
-            "CSR is not loaded.\n");
-       WARN(!I915_READ(CSR_PROGRAM_BASE),
-                               "CSR program storage start is NULL\n");
-       WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
-       WARN(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+       WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
+                 "CSR is not loaded.\n");
+       WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
+                 "CSR program storage start is NULL\n");
+       WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
+       WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
 }
index 61575f67a62630a0575e575d133d2cf93ce2a6c6..9e640eafc50d2e52f7fe2c2ded012e7dab01d4bc 100644 (file)
@@ -414,7 +414,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
                                      bool supports_hdmi)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 reg;
        u32 iboost_bit = 0;
        int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
            size;
@@ -505,11 +504,11 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
                BUG();
        }
 
-       for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) {
-               I915_WRITE(reg, ddi_translations[i].trans1 | iboost_bit);
-               reg += 4;
-               I915_WRITE(reg, ddi_translations[i].trans2);
-               reg += 4;
+       for (i = 0; i < size; i++) {
+               I915_WRITE(DDI_BUF_TRANS_LO(port, i),
+                          ddi_translations[i].trans1 | iboost_bit);
+               I915_WRITE(DDI_BUF_TRANS_HI(port, i),
+                          ddi_translations[i].trans2);
        }
 
        if (!supports_hdmi)
@@ -521,10 +520,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
                hdmi_level = hdmi_default_entry;
 
        /* Entry 9 is for HDMI: */
-       I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
-       reg += 4;
-       I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
-       reg += 4;
+       I915_WRITE(DDI_BUF_TRANS_LO(port, i),
+                  ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
+       I915_WRITE(DDI_BUF_TRANS_HI(port, i),
+                  ddi_translations_hdmi[hdmi_level].trans2);
 }
 
 /* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -707,7 +706,6 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
        intel_dp->DP = intel_dig_port->saved_port_bits |
                DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
        intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
-
 }
 
 static struct intel_encoder *
@@ -1242,9 +1240,10 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
 static bool
 hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
                   struct intel_crtc_state *crtc_state,
-                  struct intel_encoder *intel_encoder,
-                  int clock)
+                  struct intel_encoder *intel_encoder)
 {
+       int clock = crtc_state->port_clock;
+
        if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
                struct intel_shared_dpll *pll;
                uint32_t val;
@@ -1523,11 +1522,11 @@ skip_remaining_dividers:
 static bool
 skl_ddi_pll_select(struct intel_crtc *intel_crtc,
                   struct intel_crtc_state *crtc_state,
-                  struct intel_encoder *intel_encoder,
-                  int clock)
+                  struct intel_encoder *intel_encoder)
 {
        struct intel_shared_dpll *pll;
        uint32_t ctrl1, cfgcr1, cfgcr2;
+       int clock = crtc_state->port_clock;
 
        /*
         * See comment in intel_dpll_hw_state to understand why we always use 0
@@ -1615,14 +1614,14 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
 static bool
 bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
                   struct intel_crtc_state *crtc_state,
-                  struct intel_encoder *intel_encoder,
-                  int clock)
+                  struct intel_encoder *intel_encoder)
 {
        struct intel_shared_dpll *pll;
        struct bxt_clk_div clk_div = {0};
        int vco = 0;
        uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
        uint32_t lanestagger;
+       int clock = crtc_state->port_clock;
 
        if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
                intel_clock_t best_clock;
@@ -1750,17 +1749,16 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
        struct drm_device *dev = intel_crtc->base.dev;
        struct intel_encoder *intel_encoder =
                intel_ddi_get_crtc_new_encoder(crtc_state);
-       int clock = crtc_state->port_clock;
 
        if (IS_SKYLAKE(dev))
                return skl_ddi_pll_select(intel_crtc, crtc_state,
-                                         intel_encoder, clock);
+                                         intel_encoder);
        else if (IS_BROXTON(dev))
                return bxt_ddi_pll_select(intel_crtc, crtc_state,
-                                         intel_encoder, clock);
+                                         intel_encoder);
        else
                return hsw_ddi_pll_select(intel_crtc, crtc_state,
-                                         intel_encoder, clock);
+                                         intel_encoder);
 }
 
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -1893,7 +1891,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
                } else
                        temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
-               temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+               temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
        } else if (type == INTEL_OUTPUT_DP_MST) {
                struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
 
@@ -1902,7 +1900,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
                } else
                        temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
-               temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+               temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
        } else {
                WARN(1, "Invalid encoder type %d for pipe %c\n",
                     intel_encoder->type, pipe_name(pipe));
@@ -2289,6 +2287,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
        if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+               intel_dp_set_link_params(intel_dp, crtc->config);
+
                intel_ddi_init_dp_buf_reg(intel_encoder);
 
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -2881,7 +2881,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
         * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
         */
        hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
-       if (I915_READ(BXT_PORT_PCS_DW12_LN23(port) != hw_state->pcsdw12))
+       if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
                DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
                                 hw_state->pcsdw12,
                                 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
@@ -3069,6 +3069,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
        case TRANS_DDI_MODE_SELECT_DP_SST:
        case TRANS_DDI_MODE_SELECT_DP_MST:
                pipe_config->has_dp_encoder = true;
+               pipe_config->lane_count =
+                       ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
                intel_dp_get_m_n(intel_crtc, pipe_config);
                break;
        default:
@@ -3215,7 +3217,15 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
                        goto err;
 
                intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-               dev_priv->hotplug.irq_port[port] = intel_dig_port;
+               /*
+                * On BXT A0/A1, sw needs to activate DDIA HPD logic and
+                * interrupts to check the external panel connection.
+                */
+               if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)
+                                        && port == PORT_B)
+                       dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
+               else
+                       dev_priv->hotplug.irq_port[port] = intel_dig_port;
        }
 
        /* In theory we don't need the encoder->type check, but leave it just in
index cf418be7d30a52d0e25ac42201b61b0e42f16dbe..184725770ae7ec19610149179e05b29e12d70581 100644 (file)
@@ -72,6 +72,10 @@ static const uint32_t skl_primary_formats[] = {
        DRM_FORMAT_ABGR8888,
        DRM_FORMAT_XRGB2101010,
        DRM_FORMAT_XBGR2101010,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
 };
 
 /* Cursor formats */
@@ -108,6 +112,9 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
        struct intel_crtc_state *crtc_state);
 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
                           int num_connectors);
+static void skylake_pfit_enable(struct intel_crtc *crtc);
+static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
+static void ironlake_pfit_enable(struct intel_crtc *crtc);
 static void intel_modeset_setup_hw_state(struct drm_device *dev);
 
 typedef struct {
@@ -135,6 +142,39 @@ intel_pch_rawclk(struct drm_device *dev)
        return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
 }
 
+/* hrawclock is 1/4 the FSB frequency */
+int intel_hrawclk(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t clkcfg;
+
+       /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+       if (IS_VALLEYVIEW(dev))
+               return 200;
+
+       clkcfg = I915_READ(CLKCFG);
+       switch (clkcfg & CLKCFG_FSB_MASK) {
+       case CLKCFG_FSB_400:
+               return 100;
+       case CLKCFG_FSB_533:
+               return 133;
+       case CLKCFG_FSB_667:
+               return 166;
+       case CLKCFG_FSB_800:
+               return 200;
+       case CLKCFG_FSB_1067:
+               return 266;
+       case CLKCFG_FSB_1333:
+               return 333;
+       /* these two are just a guess; one of them might be right */
+       case CLKCFG_FSB_1600:
+       case CLKCFG_FSB_1600_ALT:
+               return 400;
+       default:
+               return 133;
+       }
+}
+
 static inline u32 /* units of 100MHz */
 intel_fdi_link_freq(struct drm_device *dev)
 {
@@ -1061,54 +1101,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
        }
 }
 
-/*
- * ibx_digital_port_connected - is the specified port connected?
- * @dev_priv: i915 private structure
- * @port: the port to test
- *
- * Returns true if @port is connected, false otherwise.
- */
-bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
-                               struct intel_digital_port *port)
-{
-       u32 bit;
-
-       if (HAS_PCH_IBX(dev_priv->dev)) {
-               switch (port->port) {
-               case PORT_B:
-                       bit = SDE_PORTB_HOTPLUG;
-                       break;
-               case PORT_C:
-                       bit = SDE_PORTC_HOTPLUG;
-                       break;
-               case PORT_D:
-                       bit = SDE_PORTD_HOTPLUG;
-                       break;
-               default:
-                       return true;
-               }
-       } else {
-               switch (port->port) {
-               case PORT_B:
-                       bit = SDE_PORTB_HOTPLUG_CPT;
-                       break;
-               case PORT_C:
-                       bit = SDE_PORTC_HOTPLUG_CPT;
-                       break;
-               case PORT_D:
-                       bit = SDE_PORTD_HOTPLUG_CPT;
-                       break;
-               case PORT_E:
-                       bit = SDE_PORTE_HOTPLUG_SPT;
-                       break;
-               default:
-                       return true;
-               }
-       }
-
-       return I915_READ(SDEISR) & bit;
-}
-
 static const char *state_string(bool enabled)
 {
        return enabled ? "on" : "off";
@@ -1585,26 +1577,6 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 }
 
-static void intel_init_dpio(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (!IS_VALLEYVIEW(dev))
-               return;
-
-       /*
-        * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
-        * CHV x1 PHY (DP/HDMI D)
-        * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
-        */
-       if (IS_CHERRYVIEW(dev)) {
-               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
-               DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
-       } else {
-               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
-       }
-}
-
 static void vlv_enable_pll(struct intel_crtc *crtc,
                           const struct intel_crtc_state *pipe_config)
 {
@@ -1831,17 +1803,6 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
        val &= ~DPIO_DCLKP_EN;
        vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
 
-       /* disable left/right clock distribution */
-       if (pipe != PIPE_B) {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
-               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
-       } else {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
-               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
-       }
-
        mutex_unlock(&dev_priv->sb_lock);
 }
 
@@ -2229,7 +2190,7 @@ static bool need_vtd_wa(struct drm_device *dev)
 
 unsigned int
 intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
-                 uint64_t fb_format_modifier)
+                 uint64_t fb_format_modifier, unsigned int plane)
 {
        unsigned int tile_height;
        uint32_t pixel_bytes;
@@ -2245,7 +2206,7 @@ intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
                tile_height = 32;
                break;
        case I915_FORMAT_MOD_Yf_TILED:
-               pixel_bytes = drm_format_plane_cpp(pixel_format, 0);
+               pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
                switch (pixel_bytes) {
                default:
                case 1:
@@ -2279,7 +2240,7 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height,
                      uint32_t pixel_format, uint64_t fb_format_modifier)
 {
        return ALIGN(height, intel_tile_height(dev, pixel_format,
-                                              fb_format_modifier));
+                                              fb_format_modifier, 0));
 }
 
 static int
@@ -2302,15 +2263,27 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
        info->height = fb->height;
        info->pixel_format = fb->pixel_format;
        info->pitch = fb->pitches[0];
+       info->uv_offset = fb->offsets[1];
        info->fb_modifier = fb->modifier[0];
 
        tile_height = intel_tile_height(fb->dev, fb->pixel_format,
-                                       fb->modifier[0]);
+                                       fb->modifier[0], 0);
        tile_pitch = PAGE_SIZE / tile_height;
        info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
        info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
        info->size = info->width_pages * info->height_pages * PAGE_SIZE;
 
+       if (info->pixel_format == DRM_FORMAT_NV12) {
+               tile_height = intel_tile_height(fb->dev, fb->pixel_format,
+                                               fb->modifier[0], 1);
+               tile_pitch = PAGE_SIZE / tile_height;
+               info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+               info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
+                                                    tile_height);
+               info->size_uv = info->width_pages_uv * info->height_pages_uv *
+                               PAGE_SIZE;
+       }
+
        return 0;
 }
 
@@ -2769,6 +2742,9 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
                        (intel_crtc->config->pipe_src_w - 1) * pixel_size;
        }
 
+       intel_crtc->adjusted_x = x;
+       intel_crtc->adjusted_y = y;
+
        I915_WRITE(reg, dspcntr);
 
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
@@ -2869,6 +2845,9 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
                }
        }
 
+       intel_crtc->adjusted_x = x;
+       intel_crtc->adjusted_y = y;
+
        I915_WRITE(reg, dspcntr);
 
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
@@ -2918,14 +2897,29 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
 }
 
 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
-                                    struct drm_i915_gem_object *obj)
+                                    struct drm_i915_gem_object *obj,
+                                    unsigned int plane)
 {
        const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
+       struct i915_vma *vma;
+       unsigned char *offset;
 
        if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
                view = &i915_ggtt_view_rotated;
 
-       return i915_gem_obj_ggtt_offset_view(obj, view);
+       vma = i915_gem_obj_to_ggtt_view(obj, view);
+       if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
+               view->type))
+               return -1;
+
+       offset = (unsigned char *)vma->node.start;
+
+       if (plane == 1) {
+               offset += vma->ggtt_view.rotation_info.uv_start_page *
+                         PAGE_SIZE;
+       }
+
+       return (unsigned long)offset;
 }
 
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -2936,8 +2930,6 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
        I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
        I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
        I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
-       DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
-               intel_crtc->base.base.id, intel_crtc->pipe, id);
 }
 
 /*
@@ -3083,7 +3075,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
        obj = intel_fb_obj(fb);
        stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
                                               fb->pixel_format);
-       surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj);
+       surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
 
        /*
         * FIXME: intel_plane_state->src, dst aren't set when transitional
@@ -3110,7 +3102,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
        if (intel_rotation_90_or_270(rotation)) {
                /* stride = Surface height in tiles */
                tile_height = intel_tile_height(dev, fb->pixel_format,
-                                               fb->modifier[0]);
+                                               fb->modifier[0], 0);
                stride = DIV_ROUND_UP(fb->height, tile_height);
                x_offset = stride * tile_height - y - src_h;
                y_offset = x;
@@ -3123,6 +3115,9 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
        }
        plane_offset = y_offset << 16 | x_offset;
 
+       intel_crtc->adjusted_x = x_offset;
+       intel_crtc->adjusted_y = y_offset;
+
        I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
        I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
        I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
@@ -3179,24 +3174,20 @@ static void intel_complete_page_flips(struct drm_device *dev)
 
 static void intel_update_primary_planes(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
 
        for_each_crtc(dev, crtc) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               struct intel_plane *plane = to_intel_plane(crtc->primary);
+               struct intel_plane_state *plane_state;
 
-               drm_modeset_lock(&crtc->mutex, NULL);
-               /*
-                * FIXME: Once we have proper support for primary planes (and
-                * disabling them without disabling the entire crtc) allow again
-                * a NULL crtc->primary->fb.
-                */
-               if (intel_crtc->active && crtc->primary->fb)
-                       dev_priv->display.update_primary_plane(crtc,
-                                                              crtc->primary->fb,
-                                                              crtc->x,
-                                                              crtc->y);
-               drm_modeset_unlock(&crtc->mutex);
+               drm_modeset_lock_crtc(crtc, &plane->base);
+
+               plane_state = to_intel_plane_state(plane->base.state);
+
+               if (plane_state->base.fb)
+                       plane->commit_plane(&plane->base, plane_state);
+
+               drm_modeset_unlock_crtc(crtc);
        }
 }
 
@@ -3240,6 +3231,9 @@ void intel_finish_reset(struct drm_device *dev)
                 * so update the base address of all primary
                 * planes to the the last fb to make sure we're
                 * showing the correct fb after a reset.
+                *
+                * FIXME: Atomic will make this obsolete since we won't schedule
+                * CS-based flips (which might get lost in gpu resets) any more.
                 */
                intel_update_primary_planes(dev);
                return;
@@ -3310,14 +3304,23 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
        return pending;
 }
 
-static void intel_update_pipe_size(struct intel_crtc *crtc)
+static void intel_update_pipe_config(struct intel_crtc *crtc,
+                                    struct intel_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       const struct drm_display_mode *adjusted_mode;
+       struct intel_crtc_state *pipe_config =
+               to_intel_crtc_state(crtc->base.state);
 
-       if (!i915.fastboot)
-               return;
+       /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
+       crtc->base.mode = crtc->base.state->mode;
+
+       DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
+                     old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
+                     pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+
+       if (HAS_DDI(dev))
+               intel_set_pipe_csc(&crtc->base);
 
        /*
         * Update pipe size and adjust fitter if needed: the reason for this is
@@ -3326,27 +3329,24 @@ static void intel_update_pipe_size(struct intel_crtc *crtc)
         * fastboot case, we'll flip, but if we don't update the pipesrc and
         * pfit state, we'll end up with a big fb scanned out into the wrong
         * sized surface.
-        *
-        * To fix this properly, we need to hoist the checks up into
-        * compute_mode_changes (or above), check the actual pfit state and
-        * whether the platform allows pfit disable with pipe active, and only
-        * then update the pipesrc and pfit state, even on the flip path.
         */
 
-       adjusted_mode = &crtc->config->base.adjusted_mode;
-
        I915_WRITE(PIPESRC(crtc->pipe),
-                  ((adjusted_mode->crtc_hdisplay - 1) << 16) |
-                  (adjusted_mode->crtc_vdisplay - 1));
-       if (!crtc->config->pch_pfit.enabled &&
-           (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
-            intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
-               I915_WRITE(PF_CTL(crtc->pipe), 0);
-               I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
-               I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
+                  ((pipe_config->pipe_src_w - 1) << 16) |
+                  (pipe_config->pipe_src_h - 1));
+
+       /* on skylake this is done by detaching scalers */
+       if (INTEL_INFO(dev)->gen >= 9) {
+               skl_detach_scalers(crtc);
+
+               if (pipe_config->pch_pfit.enabled)
+                       skylake_pfit_enable(crtc);
+       } else if (HAS_PCH_SPLIT(dev)) {
+               if (pipe_config->pch_pfit.enabled)
+                       ironlake_pfit_enable(crtc);
+               else if (old_crtc_state->pch_pfit.enabled)
+                       ironlake_pfit_disable(crtc, true);
        }
-       crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
-       crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
 }
 
 static void intel_fdi_normal_train(struct drm_crtc *crtc)
@@ -4963,12 +4963,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 
        intel_ddi_enable_pipe_clock(intel_crtc);
 
-       if (INTEL_INFO(dev)->gen == 9)
+       if (INTEL_INFO(dev)->gen >= 9)
                skylake_pfit_enable(intel_crtc);
-       else if (INTEL_INFO(dev)->gen < 9)
-               ironlake_pfit_enable(intel_crtc);
        else
-               MISSING_CASE(INTEL_INFO(dev)->gen);
+               ironlake_pfit_enable(intel_crtc);
 
        /*
         * On ILK+ LUT must be loaded before the pipe is running but with
@@ -5005,7 +5003,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        }
 }
 
-static void ironlake_pfit_disable(struct intel_crtc *crtc)
+static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5013,7 +5011,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
 
        /* To avoid upsetting the power well on haswell only disable the pfit if
         * it's in use. The hw state code will make sure we get this right. */
-       if (crtc->config->pch_pfit.enabled) {
+       if (force || crtc->config->pch_pfit.enabled) {
                I915_WRITE(PF_CTL(pipe), 0);
                I915_WRITE(PF_WIN_POS(pipe), 0);
                I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -5040,7 +5038,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 
        intel_disable_pipe(intel_crtc);
 
-       ironlake_pfit_disable(intel_crtc);
+       ironlake_pfit_disable(intel_crtc, false);
 
        if (intel_crtc->config->has_pch_encoder)
                ironlake_fdi_disable(crtc);
@@ -5100,12 +5098,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 
        intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
-       if (INTEL_INFO(dev)->gen == 9)
+       if (INTEL_INFO(dev)->gen >= 9)
                skylake_scaler_disable(intel_crtc);
-       else if (INTEL_INFO(dev)->gen < 9)
-               ironlake_pfit_disable(intel_crtc);
        else
-               MISSING_CASE(INTEL_INFO(dev)->gen);
+               ironlake_pfit_disable(intel_crtc, false);
 
        intel_ddi_disable_pipe_clock(intel_crtc);
 
@@ -5277,6 +5273,21 @@ static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
                        modeset_put_power_domains(dev_priv, put_domains[i]);
 }
 
+static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
+{
+       int max_cdclk_freq = dev_priv->max_cdclk_freq;
+
+       if (INTEL_INFO(dev_priv)->gen >= 9 ||
+           IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               return max_cdclk_freq;
+       else if (IS_CHERRYVIEW(dev_priv))
+               return max_cdclk_freq*95/100;
+       else if (INTEL_INFO(dev_priv)->gen < 4)
+               return 2*max_cdclk_freq*90/100;
+       else
+               return max_cdclk_freq*90/100;
+}
+
 static void intel_update_max_cdclk(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5316,8 +5327,13 @@ static void intel_update_max_cdclk(struct drm_device *dev)
                dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
        }
 
+       dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+
        DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
                         dev_priv->max_cdclk_freq);
+
+       DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
+                        dev_priv->max_dotclk_freq);
 }
 
 static void intel_update_cdclk(struct drm_device *dev)
@@ -6035,13 +6051,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
-       if (!is_dsi) {
-               if (IS_CHERRYVIEW(dev))
-                       chv_prepare_pll(intel_crtc, intel_crtc->config);
-               else
-                       vlv_prepare_pll(intel_crtc, intel_crtc->config);
-       }
-
        if (intel_crtc->config->has_dp_encoder)
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
@@ -6065,10 +6074,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
                        encoder->pre_pll_enable(encoder);
 
        if (!is_dsi) {
-               if (IS_CHERRYVIEW(dev))
+               if (IS_CHERRYVIEW(dev)) {
+                       chv_prepare_pll(intel_crtc, intel_crtc->config);
                        chv_enable_pll(intel_crtc, intel_crtc->config);
-               else
+               } else {
+                       vlv_prepare_pll(intel_crtc, intel_crtc->config);
                        vlv_enable_pll(intel_crtc, intel_crtc->config);
+               }
        }
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -6196,6 +6208,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
                        i9xx_disable_pll(intel_crtc);
        }
 
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->post_pll_disable)
+                       encoder->post_pll_disable(encoder);
+
        if (!IS_GEN2(dev))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
@@ -7377,8 +7393,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
                        1 << DPIO_CHV_N_DIV_SHIFT);
 
        /* M2 fraction division */
-       if (bestm2_frac)
-               vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+       vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
 
        /* M2 fraction division enable */
        dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
@@ -8119,6 +8134,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        else
                i9xx_crtc_clock_get(crtc, pipe_config);
 
+       /*
+        * Normally the dotclock is filled in by the encoder .get_config()
+        * but in case the pipe is enabled w/o any ports we need a sane
+        * default.
+        */
+       pipe_config->base.adjusted_mode.crtc_clock =
+               pipe_config->port_clock / pipe_config->pixel_multiplier;
+
        return true;
 }
 
@@ -8380,8 +8403,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
 
        if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
                with_spread = true;
-       if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
-                with_fdi, "LP PCH doesn't have FDI\n"))
+       if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
                with_fdi = false;
 
        mutex_lock(&dev_priv->sb_lock);
@@ -8404,8 +8426,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
                }
        }
 
-       reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
-              SBI_GEN0 : SBI_DBUFF0;
+       reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
        tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
        tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
        intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -8421,8 +8442,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
 
        mutex_lock(&dev_priv->sb_lock);
 
-       reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
-              SBI_GEN0 : SBI_DBUFF0;
+       reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
        tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
        tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
        intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
@@ -9434,7 +9454,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 
        DRM_DEBUG_KMS("Enabling package C8+\n");
 
-       if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+       if (HAS_PCH_LPT_LP(dev)) {
                val = I915_READ(SOUTH_DSPCLK_GATE_D);
                val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
                I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -9454,7 +9474,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
        hsw_restore_lcpll(dev_priv);
        lpt_init_pch_refclk(dev);
 
-       if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+       if (HAS_PCH_LPT_LP(dev)) {
                val = I915_READ(SOUTH_DSPCLK_GATE_D);
                val |= PCH_LP_PARTITION_LEVEL_DISABLE;
                I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -9804,12 +9824,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        }
 
        if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
-               if (INTEL_INFO(dev)->gen == 9)
+               if (INTEL_INFO(dev)->gen >= 9)
                        skylake_get_pfit_config(crtc, pipe_config);
-               else if (INTEL_INFO(dev)->gen < 9)
-                       ironlake_get_pfit_config(crtc, pipe_config);
                else
-                       MISSING_CASE(INTEL_INFO(dev)->gen);
+                       ironlake_get_pfit_config(crtc, pipe_config);
        }
 
        if (IS_HASWELL(dev))
@@ -9943,8 +9961,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
-       int x = crtc->cursor_x;
-       int y = crtc->cursor_y;
+       struct drm_plane_state *cursor_state = crtc->cursor->state;
+       int x = cursor_state->crtc_x;
+       int y = cursor_state->crtc_y;
        u32 base = 0, pos = 0;
 
        if (on)
@@ -9957,7 +9976,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
                base = 0;
 
        if (x < 0) {
-               if (x + intel_crtc->base.cursor->state->crtc_w <= 0)
+               if (x + cursor_state->crtc_w <= 0)
                        base = 0;
 
                pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -9966,7 +9985,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
        pos |= x << CURSOR_X_SHIFT;
 
        if (y < 0) {
-               if (y + intel_crtc->base.cursor->state->crtc_h <= 0)
+               if (y + cursor_state->crtc_h <= 0)
                        base = 0;
 
                pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -9982,8 +10001,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
        /* ILK+ do this automagically */
        if (HAS_GMCH_DISPLAY(dev) &&
            crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
-               base += (intel_crtc->base.cursor->state->crtc_h *
-                       intel_crtc->base.cursor->state->crtc_w - 1) * 4;
+               base += (cursor_state->crtc_h *
+                        cursor_state->crtc_w - 1) * 4;
        }
 
        if (IS_845G(dev) || IS_I865G(dev))
@@ -11034,10 +11053,10 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
                                        DERRMR_PIPEB_PRI_FLIP_DONE |
                                        DERRMR_PIPEC_PRI_FLIP_DONE));
                if (IS_GEN8(dev))
-                       intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
+                       intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
                                              MI_SRM_LRM_GLOBAL_GTT);
                else
-                       intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
+                       intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
                                              MI_SRM_LRM_GLOBAL_GTT);
                intel_ring_emit(ring, DERRMR);
                intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
@@ -11161,11 +11180,10 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc)
 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       u32 start_vbl_count;
 
        intel_mark_page_flip_active(intel_crtc);
 
-       intel_pipe_update_start(intel_crtc, &start_vbl_count);
+       intel_pipe_update_start(intel_crtc);
 
        if (INTEL_INFO(dev)->gen >= 9)
                skl_do_mmio_flip(intel_crtc);
@@ -11173,7 +11191,7 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
                /* use_mmio_flip() retricts MMIO flips to ilk+ */
                ilk_do_mmio_flip(intel_crtc);
 
-       intel_pipe_update_end(intel_crtc, start_vbl_count);
+       intel_pipe_update_end(intel_crtc);
 }
 
 static void intel_mmio_flip_work_func(struct work_struct *work)
@@ -11237,6 +11255,9 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
        if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
                return true;
 
+       if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
+               return false;
+
        if (!work->enable_stall_check)
                return false;
 
@@ -11417,8 +11438,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        if (ret)
                goto cleanup_pending;
 
-       work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
-                                                 + intel_crtc->dspaddr_offset;
+       work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
+                                                 obj, 0);
+       work->gtt_offset += intel_crtc->dspaddr_offset;
 
        if (mmio_flip) {
                ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
@@ -11627,7 +11649,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
                intel_crtc->atomic.update_wm_pre = true;
        }
 
-       if (visible)
+       if (visible || was_visible)
                intel_crtc->atomic.fb_bits |=
                        to_intel_plane(plane)->frontbuffer_bit;
 
@@ -11900,14 +11922,16 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
                      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
                      pipe_config->fdi_m_n.tu);
-       DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+       DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
                      pipe_config->has_dp_encoder,
+                     pipe_config->lane_count,
                      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
                      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
                      pipe_config->dp_m_n.tu);
 
-       DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
+       DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
                      pipe_config->has_dp_encoder,
+                     pipe_config->lane_count,
                      pipe_config->dp_m2_n2.gmch_m,
                      pipe_config->dp_m2_n2.gmch_n,
                      pipe_config->dp_m2_n2.link_m,
@@ -12119,10 +12143,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
              (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
                pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
 
-       /* Compute a starting value for pipe_config->pipe_bpp taking the source
-        * plane pixel format and any sink constraints into account. Returns the
-        * source plane bpp so that dithering can be selected on mismatches
-        * after encoders and crtc also have had their say. */
        base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
                                             pipe_config);
        if (base_bpp < 0)
@@ -12191,7 +12211,7 @@ encoder_retry:
        /* Dithering seems to not pass-through bits correctly when it should, so
         * only enable it on 6bpc panels. */
        pipe_config->dither = pipe_config->pipe_bpp == 6*3;
-       DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
+       DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
 fail:
@@ -12241,7 +12261,6 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
                            base.head) \
                if (mask & (1 <<(intel_crtc)->pipe))
 
-
 static bool
 intel_compare_m_n(unsigned int m, unsigned int n,
                  unsigned int m2, unsigned int n2,
@@ -12414,6 +12433,7 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_M_N(fdi_m_n);
 
        PIPE_CONF_CHECK_I(has_dp_encoder);
+       PIPE_CONF_CHECK_I(lane_count);
 
        if (INTEL_INFO(dev)->gen < 8) {
                PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -12461,22 +12481,24 @@ intel_pipe_config_compare(struct drm_device *dev,
                                      DRM_MODE_FLAG_NVSYNC);
        }
 
-       PIPE_CONF_CHECK_I(pipe_src_w);
-       PIPE_CONF_CHECK_I(pipe_src_h);
-
-       PIPE_CONF_CHECK_I(gmch_pfit.control);
+       PIPE_CONF_CHECK_X(gmch_pfit.control);
        /* pfit ratios are autocomputed by the hw on gen4+ */
        if (INTEL_INFO(dev)->gen < 4)
                PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
-       PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+       PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
 
-       PIPE_CONF_CHECK_I(pch_pfit.enabled);
-       if (current_config->pch_pfit.enabled) {
-               PIPE_CONF_CHECK_I(pch_pfit.pos);
-               PIPE_CONF_CHECK_I(pch_pfit.size);
-       }
+       if (!adjust) {
+               PIPE_CONF_CHECK_I(pipe_src_w);
+               PIPE_CONF_CHECK_I(pipe_src_h);
+
+               PIPE_CONF_CHECK_I(pch_pfit.enabled);
+               if (current_config->pch_pfit.enabled) {
+                       PIPE_CONF_CHECK_X(pch_pfit.pos);
+                       PIPE_CONF_CHECK_X(pch_pfit.size);
+               }
 
-       PIPE_CONF_CHECK_I(scaler_state.scaler_id);
+               PIPE_CONF_CHECK_I(scaler_state.scaler_id);
+       }
 
        /* BDW+ don't expose a synchronous way to read the state */
        if (IS_HASWELL(dev))
@@ -12638,7 +12660,8 @@ check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
                struct intel_crtc_state *pipe_config, *sw_config;
                bool active;
 
-               if (!needs_modeset(crtc->state))
+               if (!needs_modeset(crtc->state) &&
+                   !to_intel_crtc_state(crtc->state)->update_pipe)
                        continue;
 
                __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
@@ -12934,7 +12957,6 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
        return ret;
 }
 
-
 static int intel_modeset_checks(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
@@ -13020,11 +13042,11 @@ static int intel_atomic_check(struct drm_device *dev,
                if (ret)
                        return ret;
 
-               if (i915.fastboot &&
-                   intel_pipe_config_compare(state->dev,
+               if (intel_pipe_config_compare(state->dev,
                                        to_intel_crtc_state(crtc->state),
                                        pipe_config, true)) {
                        crtc_state->mode_changed = false;
+                       to_intel_crtc_state(crtc_state)->update_pipe = true;
                }
 
                if (needs_modeset(crtc_state)) {
@@ -13122,16 +13144,30 @@ static int intel_atomic_commit(struct drm_device *dev,
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
                bool modeset = needs_modeset(crtc->state);
+               bool update_pipe = !modeset &&
+                       to_intel_crtc_state(crtc->state)->update_pipe;
+               unsigned long put_domains = 0;
 
                if (modeset && crtc->state->active) {
                        update_scanline_offset(to_intel_crtc(crtc));
                        dev_priv->display.crtc_enable(crtc);
                }
 
+               if (update_pipe) {
+                       put_domains = modeset_get_crtc_power_domains(crtc);
+
+                       /* make sure intel_modeset_check_state runs */
+                       any_ms = true;
+               }
+
                if (!modeset)
                        intel_pre_plane_update(intel_crtc);
 
                drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+
+               if (put_domains)
+                       modeset_put_power_domains(dev_priv, put_domains);
+
                intel_post_plane_update(intel_crtc);
        }
 
@@ -13313,10 +13349,10 @@ static void intel_shared_dpll_init(struct drm_device *dev)
  */
 int
 intel_prepare_plane_fb(struct drm_plane *plane,
-                      struct drm_framebuffer *fb,
                       const struct drm_plane_state *new_state)
 {
        struct drm_device *dev = plane->dev;
+       struct drm_framebuffer *fb = new_state->fb;
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
@@ -13354,19 +13390,18 @@ intel_prepare_plane_fb(struct drm_plane *plane,
  */
 void
 intel_cleanup_plane_fb(struct drm_plane *plane,
-                      struct drm_framebuffer *fb,
                       const struct drm_plane_state *old_state)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct drm_i915_gem_object *obj = intel_fb_obj(old_state->fb);
 
-       if (WARN_ON(!obj))
+       if (!obj)
                return;
 
        if (plane->type != DRM_PLANE_TYPE_CURSOR ||
            !INTEL_INFO(dev)->cursor_needs_physical) {
                mutex_lock(&dev->struct_mutex);
-               intel_unpin_fb_obj(fb, old_state);
+               intel_unpin_fb_obj(old_state->fb, old_state);
                mutex_unlock(&dev->struct_mutex);
        }
 }
@@ -13448,11 +13483,9 @@ intel_commit_primary_plane(struct drm_plane *plane,
        if (!crtc->state->active)
                return;
 
-       if (state->visible)
-               /* FIXME: kill this fastboot hack */
-               intel_update_pipe_size(intel_crtc);
-
-       dev_priv->display.update_primary_plane(crtc, fb, crtc->x, crtc->y);
+       dev_priv->display.update_primary_plane(crtc, fb,
+                                              state->src.x1 >> 16,
+                                              state->src.y1 >> 16);
 }
 
 static void
@@ -13470,15 +13503,23 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_crtc_state *old_intel_state =
+               to_intel_crtc_state(old_crtc_state);
+       bool modeset = needs_modeset(crtc->state);
 
        if (intel_crtc->atomic.update_wm_pre)
                intel_update_watermarks(crtc);
 
        /* Perform vblank evasion around commit operation */
        if (crtc->state->active)
-               intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count);
+               intel_pipe_update_start(intel_crtc);
+
+       if (modeset)
+               return;
 
-       if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9)
+       if (to_intel_crtc_state(crtc->state)->update_pipe)
+               intel_update_pipe_config(intel_crtc, old_intel_state);
+       else if (INTEL_INFO(dev)->gen >= 9)
                skl_detach_scalers(intel_crtc);
 }
 
@@ -13488,7 +13529,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
        if (crtc->state->active)
-               intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count);
+               intel_pipe_update_end(intel_crtc);
 }
 
 /**
@@ -13657,10 +13698,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
        crtc = crtc ? crtc : plane->crtc;
        intel_crtc = to_intel_crtc(crtc);
 
-       plane->fb = state->base.fb;
-       crtc->cursor_x = state->base.crtc_x;
-       crtc->cursor_y = state->base.crtc_y;
-
        if (intel_crtc->cursor_bo == obj)
                goto update;
 
@@ -14799,8 +14836,6 @@ void intel_modeset_init(struct drm_device *dev)
                }
        }
 
-       intel_init_dpio(dev);
-
        intel_shared_dpll_init(dev);
 
        /* Just disable it once at startup */
@@ -14882,13 +14917,22 @@ intel_check_plane_mapping(struct intel_crtc *crtc)
        return true;
 }
 
+static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct intel_encoder *encoder;
+
+       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+               return true;
+
+       return false;
+}
+
 static void intel_sanitize_crtc(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *encoder;
        u32 reg;
-       bool enable;
 
        /* Clear any frame start delays used for debugging left by the BIOS */
        reg = PIPECONF(crtc->config->cpu_transcoder);
@@ -14897,9 +14941,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
        /* restore vblank interrupts to correct state */
        drm_crtc_vblank_reset(&crtc->base);
        if (crtc->active) {
-               drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
-               update_scanline_offset(crtc);
+               struct intel_plane *plane;
+
                drm_crtc_vblank_on(&crtc->base);
+
+               /* Disable everything but the primary plane */
+               for_each_intel_plane_on_crtc(dev, crtc, plane) {
+                       if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+                               continue;
+
+                       plane->disable_plane(&plane->base, &crtc->base);
+               }
        }
 
        /* We need to sanitize the plane -> pipe mapping first because this will
@@ -14932,16 +14984,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
 
        /* Adjust the state of the output pipe according to whether we
         * have active connectors/encoders. */
-       enable = false;
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
-               enable = true;
-               break;
-       }
-
-       if (!enable)
+       if (!intel_crtc_has_encoders(crtc))
                intel_crtc_disable_noatomic(&crtc->base);
 
        if (crtc->active != crtc->base.state->active) {
+               struct intel_encoder *encoder;
 
                /* This can happen either due to bugs in the get_hw_state
                 * functions or because of calls to intel_crtc_disable_noatomic,
@@ -15067,38 +15114,21 @@ void i915_redisable_vga(struct drm_device *dev)
        i915_redisable_vga_power_on(dev);
 }
 
-static bool primary_get_hw_state(struct intel_crtc *crtc)
+static bool primary_get_hw_state(struct intel_plane *plane)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 
-       return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE);
+       return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
 }
 
-static void readout_plane_state(struct intel_crtc *crtc,
-                               struct intel_crtc_state *crtc_state)
+/* FIXME read out full plane state for all planes */
+static void readout_plane_state(struct intel_crtc *crtc)
 {
-       struct intel_plane *p;
-       struct intel_plane_state *plane_state;
-       bool active = crtc_state->base.active;
-
-       for_each_intel_plane(crtc->base.dev, p) {
-               if (crtc->pipe != p->pipe)
-                       continue;
-
-               plane_state = to_intel_plane_state(p->base.state);
+       struct intel_plane_state *plane_state =
+               to_intel_plane_state(crtc->base.primary->state);
 
-               if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
-                       plane_state->visible = primary_get_hw_state(crtc);
-                       if (plane_state->visible)
-                               crtc->base.state->plane_mask |=
-                                       1 << drm_plane_index(&p->base);
-               } else {
-                       if (active)
-                               p->disable_plane(&p->base, &crtc->base);
-
-                       plane_state->visible = false;
-               }
-       }
+       plane_state->visible =
+               primary_get_hw_state(to_intel_plane(crtc->base.primary));
 }
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -15121,34 +15151,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                crtc->base.state->active = crtc->active;
                crtc->base.enabled = crtc->active;
 
-               memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
-               if (crtc->base.state->active) {
-                       intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
-                       intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
-                       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
-
-                       /*
-                        * The initial mode needs to be set in order to keep
-                        * the atomic core happy. It wants a valid mode if the
-                        * crtc's enabled, so we do the above call.
-                        *
-                        * At this point some state updated by the connectors
-                        * in their ->detect() callback has not run yet, so
-                        * no recalculation can be done yet.
-                        *
-                        * Even if we could do a recalculation and modeset
-                        * right now it would cause a double modeset if
-                        * fbdev or userspace chooses a different initial mode.
-                        *
-                        * If that happens, someone indicated they wanted a
-                        * mode change, which means it's safe to do a full
-                        * recalculation.
-                        */
-                       crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
-               }
-
-               crtc->base.hwmode = crtc->config->base.adjusted_mode;
-               readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
+               readout_plane_state(crtc);
 
                DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
                              crtc->base.base.id,
@@ -15207,6 +15210,39 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                              connector->base.name,
                              connector->base.encoder ? "enabled" : "disabled");
        }
+
+       for_each_intel_crtc(dev, crtc) {
+               crtc->base.hwmode = crtc->config->base.adjusted_mode;
+
+               memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+               if (crtc->base.state->active) {
+                       intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
+                       intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+                       WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+
+                       /*
+                        * The initial mode needs to be set in order to keep
+                        * the atomic core happy. It wants a valid mode if the
+                        * crtc's enabled, so we do the above call.
+                        *
+                        * At this point some state updated by the connectors
+                        * in their ->detect() callback has not run yet, so
+                        * no recalculation can be done yet.
+                        *
+                        * Even if we could do a recalculation and modeset
+                        * right now it would cause a double modeset if
+                        * fbdev or userspace chooses a different initial mode.
+                        *
+                        * If that happens, someone indicated they wanted a
+                        * mode change, which means it's safe to do a full
+                        * recalculation.
+                        */
+                       crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+
+                       drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
+                       update_scanline_offset(crtc);
+               }
+       }
 }
 
 /* Scan out the current hw modeset state,
index 0a2e33fbf20dd2902817d85c10aa6b402d40e869..06a2b1046daf73bf06a3bed1d66a57d9f1750b4e 100644 (file)
@@ -130,6 +130,11 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
 static void vlv_steal_power_sequencer(struct drm_device *dev,
                                      enum pipe pipe);
 
+static unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+       return ~((1 << lane_count) - 1) & 0xf;
+}
+
 static int
 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
 {
@@ -253,40 +258,6 @@ static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
                dst[i] = src >> ((3-i) * 8);
 }
 
-/* hrawclock is 1/4 the FSB frequency */
-static int
-intel_hrawclk(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t clkcfg;
-
-       /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
-       if (IS_VALLEYVIEW(dev))
-               return 200;
-
-       clkcfg = I915_READ(CLKCFG);
-       switch (clkcfg & CLKCFG_FSB_MASK) {
-       case CLKCFG_FSB_400:
-               return 100;
-       case CLKCFG_FSB_533:
-               return 133;
-       case CLKCFG_FSB_667:
-               return 166;
-       case CLKCFG_FSB_800:
-               return 200;
-       case CLKCFG_FSB_1067:
-               return 266;
-       case CLKCFG_FSB_1333:
-               return 333;
-       /* these two are just a guess; one of them might be right */
-       case CLKCFG_FSB_1600:
-       case CLKCFG_FSB_1600_ALT:
-               return 400;
-       default:
-               return 133;
-       }
-}
-
 static void
 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
                                    struct intel_dp *intel_dp);
@@ -333,7 +304,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe = intel_dp->pps_pipe;
-       bool pll_enabled;
+       bool pll_enabled, release_cl_override = false;
+       enum dpio_phy phy = DPIO_PHY(pipe);
+       enum dpio_channel ch = vlv_pipe_to_channel(pipe);
        uint32_t DP;
 
        if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
@@ -363,9 +336,13 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
         * The DPLL for the pipe must be enabled for this to work.
         * So enable temporarily it if it's not already enabled.
         */
-       if (!pll_enabled)
+       if (!pll_enabled) {
+               release_cl_override = IS_CHERRYVIEW(dev) &&
+                       !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+
                vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
                                 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+       }
 
        /*
         * Similar magic as in intel_dp_enable_port().
@@ -382,8 +359,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
        POSTING_READ(intel_dp->output_reg);
 
-       if (!pll_enabled)
+       if (!pll_enabled) {
                vlv_force_pll_off(dev, pipe);
+
+               if (release_cl_override)
+                       chv_phy_powergate_ch(dev_priv, phy, ch, false);
+       }
 }
 
 static enum pipe
@@ -974,6 +955,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        switch (msg->request & ~DP_AUX_I2C_MOT) {
        case DP_AUX_NATIVE_WRITE:
        case DP_AUX_I2C_WRITE:
+       case DP_AUX_I2C_WRITE_STATUS_UPDATE:
                txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
                rxsize = 2; /* 0 or 1 data bytes */
 
@@ -1383,6 +1365,19 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
        return rate_to_index(rate, intel_dp->sink_rates);
 }
 
+static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+                                 uint8_t *link_bw, uint8_t *rate_select)
+{
+       if (intel_dp->num_sink_rates) {
+               *link_bw = 0;
+               *rate_select =
+                       intel_dp_rate_select(intel_dp, port_clock);
+       } else {
+               *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
+               *rate_select = 0;
+       }
+}
+
 bool
 intel_dp_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_state *pipe_config)
@@ -1404,6 +1399,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        int link_avail, link_clock;
        int common_rates[DP_MAX_SUPPORTED_RATES] = {};
        int common_len;
+       uint8_t link_bw, rate_select;
 
        common_len = intel_dp_common_rates(intel_dp, common_rates);
 
@@ -1499,32 +1495,23 @@ found:
                 * CEA-861-E - 5.1 Default Encoding Parameters
                 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
                 */
-               if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
-                       intel_dp->color_range = DP_COLOR_RANGE_16_235;
-               else
-                       intel_dp->color_range = 0;
-       }
-
-       if (intel_dp->color_range)
-               pipe_config->limited_color_range = true;
-
-       intel_dp->lane_count = lane_count;
-
-       if (intel_dp->num_sink_rates) {
-               intel_dp->link_bw = 0;
-               intel_dp->rate_select =
-                       intel_dp_rate_select(intel_dp, common_rates[clock]);
+               pipe_config->limited_color_range =
+                       bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
        } else {
-               intel_dp->link_bw =
-                       drm_dp_link_rate_to_bw_code(common_rates[clock]);
-               intel_dp->rate_select = 0;
+               pipe_config->limited_color_range =
+                       intel_dp->limited_color_range;
        }
 
+       pipe_config->lane_count = lane_count;
+
        pipe_config->pipe_bpp = bpp;
        pipe_config->port_clock = common_rates[clock];
 
-       DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
-                     intel_dp->link_bw, intel_dp->lane_count,
+       intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
+                             &link_bw, &rate_select);
+
+       DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
+                     link_bw, rate_select, pipe_config->lane_count,
                      pipe_config->port_clock, bpp);
        DRM_DEBUG_KMS("DP link bw required %i available %i\n",
                      mode_rate, link_avail);
@@ -1586,6 +1573,13 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
        udelay(500);
 }
 
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+                             const struct intel_crtc_state *pipe_config)
+{
+       intel_dp->link_rate = pipe_config->port_clock;
+       intel_dp->lane_count = pipe_config->lane_count;
+}
+
 static void intel_dp_prepare(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
@@ -1595,6 +1589,8 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
 
+       intel_dp_set_link_params(intel_dp, crtc->config);
+
        /*
         * There are four kinds of DP registers:
         *
@@ -1619,7 +1615,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
 
        /* Handle DP bits in common between all three register formats */
        intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
-       intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
+       intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
 
        if (crtc->config->has_audio)
                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
@@ -1649,8 +1645,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
                        trans_dp &= ~TRANS_DP_ENH_FRAMING;
                I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
        } else {
-               if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
-                       intel_dp->DP |= intel_dp->color_range;
+               if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
+                   crtc->config->limited_color_range)
+                       intel_dp->DP |= DP_COLOR_RANGE_16_235;
 
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                        intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -2290,13 +2287,14 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
 
        if (HAS_PCH_CPT(dev) && port != PORT_A) {
-               tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
-               if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
+               u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+
+               if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
                        flags |= DRM_MODE_FLAG_PHSYNC;
                else
                        flags |= DRM_MODE_FLAG_NHSYNC;
 
-               if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
+               if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
                        flags |= DRM_MODE_FLAG_PVSYNC;
                else
                        flags |= DRM_MODE_FLAG_NVSYNC;
@@ -2320,6 +2318,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 
        pipe_config->has_dp_encoder = true;
 
+       pipe_config->lane_count =
+               ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+
        intel_dp_get_m_n(crtc, pipe_config);
 
        if (port == PORT_A) {
@@ -2399,38 +2400,62 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
        intel_dp_link_down(intel_dp);
 }
 
-static void chv_post_disable_dp(struct intel_encoder *encoder)
+static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+                                    bool reset)
 {
-       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       enum pipe pipe = intel_crtc->pipe;
-       u32 val;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       enum pipe pipe = crtc->pipe;
+       uint32_t val;
 
-       intel_dp_link_down(intel_dp);
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+       if (reset)
+               val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+       else
+               val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
 
-       mutex_lock(&dev_priv->sb_lock);
+       if (crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+               if (reset)
+                       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+               else
+                       val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+       }
 
-       /* Propagate soft reset to data lane reset */
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
        val |= CHV_PCS_REQ_SOFTRESET_EN;
+       if (reset)
+               val &= ~DPIO_PCS_CLK_SOFT_RESET;
+       else
+               val |= DPIO_PCS_CLK_SOFT_RESET;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
 
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+       if (crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+               val |= CHV_PCS_REQ_SOFTRESET_EN;
+               if (reset)
+                       val &= ~DPIO_PCS_CLK_SOFT_RESET;
+               else
+                       val |= DPIO_PCS_CLK_SOFT_RESET;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+       }
+}
 
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
-       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+static void chv_post_disable_dp(struct intel_encoder *encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
-       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+       intel_dp_link_down(intel_dp);
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* Assert data lane reset */
+       chv_data_lane_soft_reset(encoder, true);
 
        mutex_unlock(&dev_priv->sb_lock);
 }
@@ -2550,7 +2575,6 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
-       unsigned int lane_mask = 0x0;
 
        if (WARN_ON(dp_reg & DP_PORT_EN))
                return;
@@ -2568,9 +2592,15 @@ static void intel_enable_dp(struct intel_encoder *encoder)
 
        pps_unlock(intel_dp);
 
-       if (IS_VALLEYVIEW(dev))
+       if (IS_VALLEYVIEW(dev)) {
+               unsigned int lane_mask = 0x0;
+
+               if (IS_CHERRYVIEW(dev))
+                       lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
+
                vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
                                    lane_mask);
+       }
 
        intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
        intel_dp_start_link_train(intel_dp);
@@ -2797,31 +2827,19 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
        val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
 
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
-       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
-       /* Deassert soft data lane reset*/
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
-       val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
-       val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+               val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+       }
 
        /* Program Tx lane latency optimal setting*/
-       for (i = 0; i < 4; i++) {
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
                /* Set the upar bit */
-               data = (i == 1) ? 0x0 : 0x1;
+               if (intel_crtc->config->lane_count == 1)
+                       data = 0x0;
+               else
+                       data = (i == 1) ? 0x0 : 0x1;
                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
                                data << DPIO_UPAR_SHIFT);
        }
@@ -2842,9 +2860,11 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
        val |= DPIO_TX2_STAGGER_MASK(0x1f);
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
 
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
-       val |= DPIO_TX2_STAGGER_MASK(0x1f);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+               val |= DPIO_TX2_STAGGER_MASK(0x1f);
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+       }
 
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
                       DPIO_LANESTAGGER_STRAP(stagger) |
@@ -2853,16 +2873,27 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
                       DPIO_TX1_STAGGER_MULT(6) |
                       DPIO_TX2_STAGGER_MULT(0));
 
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
-                      DPIO_LANESTAGGER_STRAP(stagger) |
-                      DPIO_LANESTAGGER_STRAP_OVRD |
-                      DPIO_TX1_STAGGER_MASK(0x1f) |
-                      DPIO_TX1_STAGGER_MULT(7) |
-                      DPIO_TX2_STAGGER_MULT(5));
+       if (intel_crtc->config->lane_count > 2) {
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+                              DPIO_LANESTAGGER_STRAP(stagger) |
+                              DPIO_LANESTAGGER_STRAP_OVRD |
+                              DPIO_TX1_STAGGER_MASK(0x1f) |
+                              DPIO_TX1_STAGGER_MULT(7) |
+                              DPIO_TX2_STAGGER_MULT(5));
+       }
+
+       /* Deassert data lane reset */
+       chv_data_lane_soft_reset(encoder, false);
 
        mutex_unlock(&dev_priv->sb_lock);
 
        intel_enable_dp(encoder);
+
+       /* Second common lane will stay alive on its own now */
+       if (dport->release_cl2_override) {
+               chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+               dport->release_cl2_override = false;
+       }
 }
 
 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2874,12 +2905,27 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
                to_intel_crtc(encoder->base.crtc);
        enum dpio_channel ch = vlv_dport_to_channel(dport);
        enum pipe pipe = intel_crtc->pipe;
+       unsigned int lane_mask =
+               intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
        u32 val;
 
        intel_dp_prepare(encoder);
 
+       /*
+        * Must trick the second common lane into life.
+        * Otherwise we can't even access the PLL.
+        */
+       if (ch == DPIO_CH0 && pipe == PIPE_B)
+               dport->release_cl2_override =
+                       !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+       chv_phy_powergate_lanes(encoder, true, lane_mask);
+
        mutex_lock(&dev_priv->sb_lock);
 
+       /* Assert data lane reset */
+       chv_data_lane_soft_reset(encoder, true);
+
        /* program left/right clock distribution */
        if (pipe != PIPE_B) {
                val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
@@ -2908,13 +2954,15 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
                val |= CHV_PCS_USEDCLKCHANNEL;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
 
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
-       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
-       if (pipe != PIPE_B)
-               val &= ~CHV_PCS_USEDCLKCHANNEL;
-       else
-               val |= CHV_PCS_USEDCLKCHANNEL;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+               val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+               if (pipe != PIPE_B)
+                       val &= ~CHV_PCS_USEDCLKCHANNEL;
+               else
+                       val |= CHV_PCS_USEDCLKCHANNEL;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+       }
 
        /*
         * This a a bit weird since generally CL
@@ -2931,6 +2979,39 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
+static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+       u32 val;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* disable left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
+       mutex_unlock(&dev_priv->sb_lock);
+
+       /*
+        * Leave the power down bit cleared for at least one
+        * lane so that chv_powergate_phy_ch() will power
+        * on something when the channel is otherwise unused.
+        * When the port is off and the override is removed
+        * the lanes power down anyway, so otherwise it doesn't
+        * really matter what the state of power down bits is
+        * after this.
+        */
+       chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
 /*
  * Native read with retry for link status and receiver capability reads for
  * cases where the sink may still be asleep.
@@ -3167,6 +3248,12 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
        return 0;
 }
 
+static bool chv_need_uniq_trans_scale(uint8_t train_set)
+{
+       return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
+               (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+}
+
 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -3258,24 +3345,28 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
        val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
-       val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
-       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
-       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+               val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+               val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+               val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+       }
 
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
        val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
        val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
 
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
-       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
-       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+               val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+               val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+       }
 
        /* Program swing deemph */
-       for (i = 0; i < 4; i++) {
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
                val &= ~DPIO_SWING_DEEMPH9P5_MASK;
                val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
@@ -3283,43 +3374,36 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
        }
 
        /* Program swing margin */
-       for (i = 0; i < 4; i++) {
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
                val &= ~DPIO_SWING_MARGIN000_MASK;
                val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+
+               /*
+                * Supposedly this value shouldn't matter when unique transition
+                * scale is disabled, but in fact it does matter. Let's just
+                * always program the same value and hope it's OK.
+                */
+               val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+               val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
        }
 
-       /* Disable unique transition scale */
-       for (i = 0; i < 4; i++) {
+       /*
+        * The document said it needs to set bit 27 for ch0 and bit 26
+        * for ch1. Might be a typo in the doc.
+        * For now, for this unique transition scale selection, set bit
+        * 27 for ch0 and ch1.
+        */
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
-               val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
-       }
-
-       if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
-                       == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
-               ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
-                       == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
-
-               /*
-                * The document said it needs to set bit 27 for ch0 and bit 26
-                * for ch1. Might be a typo in the doc.
-                * For now, for this unique transition scale selection, set bit
-                * 27 for ch0 and ch1.
-                */
-               for (i = 0; i < 4; i++) {
-                       val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+               if (chv_need_uniq_trans_scale(train_set))
                        val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
-                       vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
-               }
-
-               for (i = 0; i < 4; i++) {
-                       val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-                       val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
-                       val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
-                       vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
-               }
+               else
+                       val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
        }
 
        /* Start swing calculation */
@@ -3327,9 +3411,11 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
        val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
-       val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+               val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+       }
 
        /* LRC Bypass */
        val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
@@ -3520,8 +3606,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
                        uint8_t dp_train_pat)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv =
+               to_i915(intel_dig_port->base.base.dev);
        uint8_t buf[sizeof(intel_dp->train_set) + 1];
        int ret, len;
 
@@ -3562,8 +3648,8 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
                           const uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv =
+               to_i915(intel_dig_port->base.base.dev);
        int ret;
 
        intel_get_adjust_train(intel_dp, link_status);
@@ -3620,19 +3706,23 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
        int voltage_tries, loop_tries;
        uint32_t DP = intel_dp->DP;
        uint8_t link_config[2];
+       uint8_t link_bw, rate_select;
 
        if (HAS_DDI(dev))
                intel_ddi_prepare_link_retrain(encoder);
 
+       intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
+                             &link_bw, &rate_select);
+
        /* Write the link configuration data */
-       link_config[0] = intel_dp->link_bw;
+       link_config[0] = link_bw;
        link_config[1] = intel_dp->lane_count;
        if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
                link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
        drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
        if (intel_dp->num_sink_rates)
                drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
-                               &intel_dp->rate_select, 1);
+                                 &rate_select, 1);
 
        link_config[0] = 0;
        link_config[1] = DP_SET_ANSI_8B10B;
@@ -3723,14 +3813,27 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
 void
 intel_dp_complete_link_train(struct intel_dp *intel_dp)
 {
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
        bool channel_eq = false;
        int tries, cr_tries;
        uint32_t DP = intel_dp->DP;
        uint32_t training_pattern = DP_TRAINING_PATTERN_2;
 
-       /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
-       if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
+       /*
+        * Training Pattern 3 for HBR2 or 1.2 devices that support it.
+        *
+        * Intel platforms that support HBR2 also support TPS3. TPS3 support is
+        * also mandatory for downstream devices that support HBR2.
+        *
+        * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
+        * supported but still not enabled.
+        */
+       if (intel_dp_source_supports_hbr2(dev) &&
+           drm_dp_tps3_supported(intel_dp->dpcd))
                training_pattern = DP_TRAINING_PATTERN_3;
+       else if (intel_dp->link_rate == 540000)
+               DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
 
        /* channel equalization */
        if (!intel_dp_set_link_train(intel_dp, &DP,
@@ -3758,7 +3861,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                }
 
                /* Make sure clock is still ok */
-               if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+               if (!drm_dp_clock_recovery_ok(link_status,
+                                             intel_dp->lane_count)) {
                        intel_dp->train_set_valid = false;
                        intel_dp_start_link_train(intel_dp);
                        intel_dp_set_link_train(intel_dp, &DP,
@@ -3768,7 +3872,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        continue;
                }
 
-               if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+               if (drm_dp_channel_eq_ok(link_status,
+                                        intel_dp->lane_count)) {
                        channel_eq = true;
                        break;
                }
@@ -3909,19 +4014,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                }
        }
 
-       /* Training Pattern 3 support, Intel platforms that support HBR2 alone
-        * have support for TP3 hence that check is used along with dpcd check
-        * to ensure TP3 can be enabled.
-        * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
-        * supported but still not enabled.
-        */
-       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
-           intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
-           intel_dp_source_supports_hbr2(dev)) {
-               intel_dp->use_tps3 = true;
-               DRM_DEBUG_KMS("Displayport TPS3 supported\n");
-       } else
-               intel_dp->use_tps3 = false;
+       DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
+                     yesno(intel_dp_source_supports_hbr2(dev)),
+                     yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
 
        /* Intermediate frequency support */
        if (is_edp(intel_dp) &&
@@ -4007,22 +4102,30 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
        return intel_dp->is_mst;
 }
 
-static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
+static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
        u8 buf;
+       int ret = 0;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
                DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
-               return;
+               ret = -EIO;
+               goto out;
        }
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-                              buf & ~DP_TEST_SINK_START) < 0)
+                              buf & ~DP_TEST_SINK_START) < 0) {
                DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+               ret = -EIO;
+               goto out;
+       }
 
+       intel_dp->sink_crc.started = false;
+ out:
        hsw_enable_ips(intel_crtc);
+       return ret;
 }
 
 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
@@ -4030,6 +4133,13 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
        u8 buf;
+       int ret;
+
+       if (intel_dp->sink_crc.started) {
+               ret = intel_dp_sink_crc_stop(intel_dp);
+               if (ret)
+                       return ret;
+       }
 
        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
                return -EIO;
@@ -4037,6 +4147,8 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
        if (!(buf & DP_TEST_CRC_SUPPORTED))
                return -ENOTTY;
 
+       intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
+
        if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
                return -EIO;
 
@@ -4048,6 +4160,7 @@ static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
                return -EIO;
        }
 
+       intel_dp->sink_crc.started = true;
        return 0;
 }
 
@@ -4057,38 +4170,55 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
        struct drm_device *dev = dig_port->base.base.dev;
        struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
        u8 buf;
-       int test_crc_count;
+       int count, ret;
        int attempts = 6;
-       int ret;
+       bool old_equal_new;
 
        ret = intel_dp_sink_crc_start(intel_dp);
        if (ret)
                return ret;
 
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
-               ret = -EIO;
-               goto stop;
-       }
-
-       test_crc_count = buf & DP_TEST_COUNT_MASK;
-
        do {
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
+
                if (drm_dp_dpcd_readb(&intel_dp->aux,
                                      DP_TEST_SINK_MISC, &buf) < 0) {
                        ret = -EIO;
                        goto stop;
                }
-               intel_wait_for_vblank(dev, intel_crtc->pipe);
-       } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
+               count = buf & DP_TEST_COUNT_MASK;
+
+               /*
+                * Count might be reset during the loop. In this case
+                * last known count needs to be reset as well.
+                */
+               if (count == 0)
+                       intel_dp->sink_crc.last_count = 0;
+
+               if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
+                       ret = -EIO;
+                       goto stop;
+               }
+
+               old_equal_new = (count == intel_dp->sink_crc.last_count &&
+                                !memcmp(intel_dp->sink_crc.last_crc, crc,
+                                        6 * sizeof(u8)));
+
+       } while (--attempts && (count == 0 || old_equal_new));
+
+       intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
+       memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
 
        if (attempts == 0) {
-               DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
-               ret = -ETIMEDOUT;
-               goto stop;
+               if (old_equal_new) {
+                       DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
+               } else {
+                       DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
+                       ret = -ETIMEDOUT;
+                       goto stop;
+               }
        }
 
-       if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
-               ret = -EIO;
 stop:
        intel_dp_sink_crc_stop(intel_dp);
        return ret;
@@ -4248,7 +4378,8 @@ go_again:
                if (bret == true) {
 
                        /* check link status - esi[10] = 0x200c */
-                       if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+                       if (intel_dp->active_mst_links &&
+                           !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
                                DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
                                intel_dp_start_link_train(intel_dp);
                                intel_dp_complete_link_train(intel_dp);
@@ -4410,58 +4541,164 @@ edp_detect(struct intel_dp *intel_dp)
        return status;
 }
 
-static enum drm_connector_status
-ironlake_dp_detect(struct intel_dp *intel_dp)
+static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+                                      struct intel_digital_port *port)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       u32 bit;
 
-       if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
-               return connector_status_disconnected;
+       switch (port->port) {
+       case PORT_A:
+               return true;
+       case PORT_B:
+               bit = SDE_PORTB_HOTPLUG;
+               break;
+       case PORT_C:
+               bit = SDE_PORTC_HOTPLUG;
+               break;
+       case PORT_D:
+               bit = SDE_PORTD_HOTPLUG;
+               break;
+       default:
+               MISSING_CASE(port->port);
+               return false;
+       }
 
-       return intel_dp_detect_dpcd(intel_dp);
+       return I915_READ(SDEISR) & bit;
+}
+
+static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
+                                      struct intel_digital_port *port)
+{
+       u32 bit;
+
+       switch (port->port) {
+       case PORT_A:
+               return true;
+       case PORT_B:
+               bit = SDE_PORTB_HOTPLUG_CPT;
+               break;
+       case PORT_C:
+               bit = SDE_PORTC_HOTPLUG_CPT;
+               break;
+       case PORT_D:
+               bit = SDE_PORTD_HOTPLUG_CPT;
+               break;
+       case PORT_E:
+               bit = SDE_PORTE_HOTPLUG_SPT;
+               break;
+       default:
+               MISSING_CASE(port->port);
+               return false;
+       }
+
+       return I915_READ(SDEISR) & bit;
 }
 
-static int g4x_digital_port_connected(struct drm_device *dev,
+static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
+                                      struct intel_digital_port *port)
+{
+       u32 bit;
+
+       switch (port->port) {
+       case PORT_B:
+               bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+               break;
+       case PORT_C:
+               bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+               break;
+       case PORT_D:
+               bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+               break;
+       default:
+               MISSING_CASE(port->port);
+               return false;
+       }
+
+       return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
+                                      struct intel_digital_port *port)
+{
+       u32 bit;
+
+       switch (port->port) {
+       case PORT_B:
+               bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+               break;
+       case PORT_C:
+               bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+               break;
+       case PORT_D:
+               bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+               break;
+       default:
+               MISSING_CASE(port->port);
+               return false;
+       }
+
+       return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
                                       struct intel_digital_port *intel_dig_port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t bit;
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       enum port port;
+       u32 bit;
 
-       if (IS_VALLEYVIEW(dev)) {
-               switch (intel_dig_port->port) {
-               case PORT_B:
-                       bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
-                       break;
-               case PORT_C:
-                       bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
-                       break;
-               case PORT_D:
-                       bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-       } else {
-               switch (intel_dig_port->port) {
-               case PORT_B:
-                       bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
-                       break;
-               case PORT_C:
-                       bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
-                       break;
-               case PORT_D:
-                       bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
-                       break;
-               default:
-                       return -EINVAL;
-               }
+       intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
+       switch (port) {
+       case PORT_A:
+               bit = BXT_DE_PORT_HP_DDIA;
+               break;
+       case PORT_B:
+               bit = BXT_DE_PORT_HP_DDIB;
+               break;
+       case PORT_C:
+               bit = BXT_DE_PORT_HP_DDIC;
+               break;
+       default:
+               MISSING_CASE(port);
+               return false;
        }
 
-       if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
-               return 0;
-       return 1;
+       return I915_READ(GEN8_DE_PORT_ISR) & bit;
+}
+
+/*
+ * intel_digital_port_connected - is the specified port connected?
+ * @dev_priv: i915 private structure
+ * @port: the port to test
+ *
+ * Return %true if @port is connected, %false otherwise.
+ */
+bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+                                        struct intel_digital_port *port)
+{
+       if (HAS_PCH_IBX(dev_priv))
+               return ibx_digital_port_connected(dev_priv, port);
+       if (HAS_PCH_SPLIT(dev_priv))
+               return cpt_digital_port_connected(dev_priv, port);
+       else if (IS_BROXTON(dev_priv))
+               return bxt_digital_port_connected(dev_priv, port);
+       else if (IS_VALLEYVIEW(dev_priv))
+               return vlv_digital_port_connected(dev_priv, port);
+       else
+               return g4x_digital_port_connected(dev_priv, port);
+}
+
+static enum drm_connector_status
+ironlake_dp_detect(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+       if (!intel_digital_port_connected(dev_priv, intel_dig_port))
+               return connector_status_disconnected;
+
+       return intel_dp_detect_dpcd(intel_dp);
 }
 
 static enum drm_connector_status
@@ -4469,7 +4706,6 @@ g4x_dp_detect(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       int ret;
 
        /* Can't disconnect eDP, but you can close the lid... */
        if (is_edp(intel_dp)) {
@@ -4481,10 +4717,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
                return status;
        }
 
-       ret = g4x_digital_port_connected(dev, intel_dig_port);
-       if (ret == -EINVAL)
-               return connector_status_unknown;
-       else if (ret == 0)
+       if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
                return connector_status_disconnected;
 
        return intel_dp_detect_dpcd(intel_dp);
@@ -4728,7 +4961,7 @@ intel_dp_set_property(struct drm_connector *connector,
 
        if (property == dev_priv->broadcast_rgb_property) {
                bool old_auto = intel_dp->color_range_auto;
-               uint32_t old_range = intel_dp->color_range;
+               bool old_range = intel_dp->limited_color_range;
 
                switch (val) {
                case INTEL_BROADCAST_RGB_AUTO:
@@ -4736,18 +4969,18 @@ intel_dp_set_property(struct drm_connector *connector,
                        break;
                case INTEL_BROADCAST_RGB_FULL:
                        intel_dp->color_range_auto = false;
-                       intel_dp->color_range = 0;
+                       intel_dp->limited_color_range = false;
                        break;
                case INTEL_BROADCAST_RGB_LIMITED:
                        intel_dp->color_range_auto = false;
-                       intel_dp->color_range = DP_COLOR_RANGE_16_235;
+                       intel_dp->limited_color_range = true;
                        break;
                default:
                        return -EINVAL;
                }
 
                if (old_auto == intel_dp->color_range_auto &&
-                   old_range == intel_dp->color_range)
+                   old_range == intel_dp->limited_color_range)
                        return 0;
 
                goto done;
@@ -4947,13 +5180,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
                /* indicate that we need to restart link training */
                intel_dp->train_set_valid = false;
 
-               if (HAS_PCH_SPLIT(dev)) {
-                       if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
-                               goto mst_fail;
-               } else {
-                       if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
-                               goto mst_fail;
-               }
+               if (!intel_digital_port_connected(dev_priv, intel_dig_port))
+                       goto mst_fail;
 
                if (!intel_dp_get_dpcd(intel_dp)) {
                        goto mst_fail;
@@ -5028,6 +5256,13 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
                [PORT_E] = DVO_PORT_DPE,
        };
 
+       /*
+        * eDP not supported on g4x. so bail out early just
+        * for a bit extra safety in case the VBT is bonkers.
+        */
+       if (INTEL_INFO(dev)->gen < 5)
+               return false;
+
        if (port == PORT_A)
                return true;
 
@@ -5853,6 +6088,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                break;
        case PORT_B:
                intel_encoder->hpd_pin = HPD_PORT_B;
+               if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
+                       intel_encoder->hpd_pin = HPD_PORT_A;
                break;
        case PORT_C:
                intel_encoder->hpd_pin = HPD_PORT_C;
@@ -5953,6 +6190,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
                intel_encoder->pre_enable = chv_pre_enable_dp;
                intel_encoder->enable = vlv_enable_dp;
                intel_encoder->post_disable = chv_post_disable_dp;
+               intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
        } else if (IS_VALLEYVIEW(dev)) {
                intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
                intel_encoder->pre_enable = vlv_pre_enable_dp;
index 6ade068884328680ffe024dd91eabb9ffe6d9013..7ada76c5ecc5541e53994b297dd9ce45ce2fe940 100644 (file)
@@ -39,7 +39,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_atomic_state *state;
        int bpp, i;
-       int lane_count, slots, rate;
+       int lane_count, slots;
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        struct drm_connector *drm_connector;
        struct intel_connector *connector, *found = NULL;
@@ -56,20 +56,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
         */
        lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
 
-       rate = intel_dp_max_link_rate(intel_dp);
 
-       if (intel_dp->num_sink_rates) {
-               intel_dp->link_bw = 0;
-               intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate);
-       } else {
-               intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate);
-               intel_dp->rate_select = 0;
-       }
-
-       intel_dp->lane_count = lane_count;
+       pipe_config->lane_count = lane_count;
 
        pipe_config->pipe_bpp = 24;
-       pipe_config->port_clock = rate;
+       pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
 
        state = pipe_config->base.state;
 
@@ -184,6 +175,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        if (intel_dp->active_mst_links == 0) {
                enum port port = intel_ddi_get_encoder_port(encoder);
 
+               intel_dp_set_link_params(intel_dp, intel_crtc->config);
+
                /* FIXME: add support for SKL */
                if (INTEL_INFO(dev)->gen < 9)
                        I915_WRITE(PORT_CLK_SEL(port),
@@ -286,6 +279,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
                break;
        }
        pipe_config->base.adjusted_mode.flags |= flags;
+
+       pipe_config->lane_count =
+               ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
        intel_dp_get_m_n(crtc, pipe_config);
 
        intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
index 2b9e6f9775c5314511a577e82965bb79c8be788f..c96289dba380328a01bfb1d9d6b58302891488a1 100644 (file)
@@ -142,6 +142,7 @@ struct intel_encoder {
        void (*mode_set)(struct intel_encoder *intel_encoder);
        void (*disable)(struct intel_encoder *);
        void (*post_disable)(struct intel_encoder *);
+       void (*post_pll_disable)(struct intel_encoder *);
        /* Read out the current hw state of this connector, returning true if
         * the encoder is active. If the encoder is enabled it also set the pipe
         * it is connected to in the pipe parameter. */
@@ -337,6 +338,8 @@ struct intel_crtc_state {
 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS      (1<<0) /* unreliable sync mode.flags */
        unsigned long quirks;
 
+       bool update_pipe;
+
        /* Pipe source size (ie. panel fitter input size)
         * All planes will be positioned inside this space,
         * and get clipped at the edges. */
@@ -423,6 +426,8 @@ struct intel_crtc_state {
        /* Used by SDVO (and if we ever fix it, HDMI). */
        unsigned pixel_multiplier;
 
+       uint8_t lane_count;
+
        /* Panel fitter controls for gen2-gen4 + VLV */
        struct {
                u32 control;
@@ -532,6 +537,8 @@ struct intel_crtc {
         * gen4+ this only adjusts up to a tile, offsets within a tile are
         * handled in the hw itself (with the TILEOFF register). */
        unsigned long dspaddr_offset;
+       int adjusted_x;
+       int adjusted_y;
 
        struct drm_i915_gem_object *cursor_bo;
        uint32_t cursor_addr;
@@ -560,7 +567,13 @@ struct intel_crtc {
 
        int scanline_offset;
 
-       unsigned start_vbl_count;
+       struct {
+               unsigned start_vbl_count;
+               ktime_t start_vbl_time;
+               int min_vbl, max_vbl;
+               int scanline_start;
+       } debug;
+
        struct intel_crtc_atomic_commit atomic;
 
        /* scalers available on this crtc */
@@ -657,13 +670,14 @@ struct cxsr_latency {
 struct intel_hdmi {
        u32 hdmi_reg;
        int ddc_bus;
-       uint32_t color_range;
+       bool limited_color_range;
        bool color_range_auto;
        bool has_hdmi_sink;
        bool has_audio;
        enum hdmi_force_audio force_audio;
        bool rgb_quant_range_selectable;
        enum hdmi_picture_aspect aspect_ratio;
+       struct intel_connector *attached_connector;
        void (*write_infoframe)(struct drm_encoder *encoder,
                                enum hdmi_infoframe_type type,
                                const void *frame, ssize_t len);
@@ -696,23 +710,29 @@ enum link_m_n_set {
        M2_N2
 };
 
+struct sink_crc {
+       bool started;
+       u8 last_crc[6];
+       int last_count;
+};
+
 struct intel_dp {
        uint32_t output_reg;
        uint32_t aux_ch_ctl_reg;
        uint32_t DP;
+       int link_rate;
+       uint8_t lane_count;
        bool has_audio;
        enum hdmi_force_audio force_audio;
-       uint32_t color_range;
+       bool limited_color_range;
        bool color_range_auto;
-       uint8_t link_bw;
-       uint8_t rate_select;
-       uint8_t lane_count;
        uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
        uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
        uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
        /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
        uint8_t num_sink_rates;
        int sink_rates[DP_MAX_SUPPORTED_RATES];
+       struct sink_crc sink_crc;
        struct drm_dp_aux aux;
        uint8_t train_set[4];
        int panel_power_up_delay;
@@ -735,7 +755,6 @@ struct intel_dp {
        enum pipe pps_pipe;
        struct edp_power_seq pps_delays;
 
-       bool use_tps3;
        bool can_mst; /* this port supports mst */
        bool is_mst;
        int active_mst_links;
@@ -770,6 +789,7 @@ struct intel_digital_port {
        struct intel_dp dp;
        struct intel_hdmi hdmi;
        enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
+       bool release_cl2_override;
 };
 
 struct intel_dp_mst_encoder {
@@ -779,7 +799,7 @@ struct intel_dp_mst_encoder {
        void *port; /* store this opaque as its illegal to dereference it */
 };
 
-static inline int
+static inline enum dpio_channel
 vlv_dport_to_channel(struct intel_digital_port *dport)
 {
        switch (dport->port) {
@@ -793,7 +813,21 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
        }
 }
 
-static inline int
+static inline enum dpio_phy
+vlv_dport_to_phy(struct intel_digital_port *dport)
+{
+       switch (dport->port) {
+       case PORT_B:
+       case PORT_C:
+               return DPIO_PHY0;
+       case PORT_D:
+               return DPIO_PHY1;
+       default:
+               BUG();
+       }
+}
+
+static inline enum dpio_channel
 vlv_pipe_to_channel(enum pipe pipe)
 {
        switch (pipe) {
@@ -834,8 +868,8 @@ struct intel_unpin_work {
        u32 flip_count;
        u32 gtt_offset;
        struct drm_i915_gem_request *flip_queued_req;
-       int flip_queued_vblank;
-       int flip_ready_vblank;
+       u32 flip_queued_vblank;
+       u32 flip_ready_vblank;
        bool enable_stall_check;
 };
 
@@ -987,6 +1021,7 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
 extern const struct drm_plane_funcs intel_plane_funcs;
 bool intel_has_pending_fb_unpin(struct drm_device *dev);
 int intel_pch_rawclk(struct drm_device *dev);
+int intel_hrawclk(struct drm_device *dev);
 void intel_mark_busy(struct drm_device *dev);
 void intel_mark_idle(struct drm_device *dev);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
@@ -995,8 +1030,6 @@ void intel_encoder_destroy(struct drm_encoder *encoder);
 int intel_connector_init(struct intel_connector *);
 struct intel_connector *intel_connector_alloc(void);
 bool intel_connector_get_hw_state(struct intel_connector *connector);
-bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
-                               struct intel_digital_port *port);
 void intel_connector_attach_encoder(struct intel_connector *connector,
                                    struct intel_encoder *encoder);
 struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -1038,10 +1071,8 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe);
 void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
 void intel_check_page_flip(struct drm_device *dev, int pipe);
 int intel_prepare_plane_fb(struct drm_plane *plane,
-                          struct drm_framebuffer *fb,
                           const struct drm_plane_state *new_state);
 void intel_cleanup_plane_fb(struct drm_plane *plane,
-                           struct drm_framebuffer *fb,
                            const struct drm_plane_state *old_state);
 int intel_plane_atomic_get_property(struct drm_plane *plane,
                                    const struct drm_plane_state *state,
@@ -1056,7 +1087,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
 
 unsigned int
 intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
-                 uint64_t fb_format_modifier);
+                 uint64_t fb_format_modifier, unsigned int plane);
 
 static inline bool
 intel_rotation_90_or_270(unsigned int rotation)
@@ -1137,7 +1168,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
-                                    struct drm_i915_gem_object *obj);
+                                    struct drm_i915_gem_object *obj,
+                                    unsigned int plane);
+
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
 u32 skl_plane_ctl_rotation(unsigned int rotation);
@@ -1155,6 +1188,8 @@ void assert_csr_loaded(struct drm_i915_private *dev_priv);
 void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                             struct intel_connector *intel_connector);
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+                             const struct intel_crtc_state *pipe_config);
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_complete_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
@@ -1185,6 +1220,8 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
 void intel_edp_drrs_invalidate(struct drm_device *dev,
                unsigned frontbuffer_bits);
 void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+                                        struct intel_digital_port *port);
 void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
 
 /* intel_dp_mst.c */
@@ -1339,6 +1376,12 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
 
 void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
 
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+                            bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+                         enum dpio_channel ch, bool override);
+
+
 /* intel_pm.c */
 void intel_init_clock_gating(struct drm_device *dev);
 void intel_suspend_hw(struct drm_device *dev);
@@ -1384,9 +1427,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
 int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
-void intel_pipe_update_start(struct intel_crtc *crtc,
-                            uint32_t *start_vbl_count);
-void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
+void intel_pipe_update_start(struct intel_crtc *crtc);
+void intel_pipe_update_end(struct intel_crtc *crtc);
 
 /* intel_tv.c */
 void intel_tv_init(struct drm_device *dev);
index 32a6c7184ca4fcbcc73786e678f634a22453224d..61d69c214508e68fe61a349d3bd4a60c132bffb7 100644 (file)
@@ -557,7 +557,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
                usleep_range(2000, 2500);
        }
 
-       vlv_disable_dsi_pll(encoder);
+       intel_disable_dsi_pll(encoder);
 }
 
 static void intel_dsi_post_disable(struct intel_encoder *encoder)
@@ -654,6 +654,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+       int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
        DRM_DEBUG_KMS("\n");
 
@@ -667,6 +668,8 @@ intel_dsi_mode_valid(struct drm_connector *connector,
                        return MODE_PANEL;
                if (mode->vdisplay > fixed_mode->vdisplay)
                        return MODE_PANEL;
+               if (fixed_mode->clock > max_dotclk)
+                       return MODE_CLOCK_HIGH;
        }
 
        return MODE_OK;
@@ -734,6 +737,21 @@ static void set_dsi_timings(struct drm_encoder *encoder,
        hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
 
        for_each_dsi_port(port, intel_dsi->ports) {
+               if (IS_BROXTON(dev)) {
+                       /*
+                        * Program hdisplay and vdisplay on MIPI transcoder.
+                        * This is different from calculated hactive and
+                        * vactive, as they are calculated per channel basis,
+                        * whereas these values should be based on resolution.
+                        */
+                       I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
+                                       mode->hdisplay);
+                       I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
+                                       mode->vdisplay);
+                       I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
+                                       mode->vtotal);
+               }
+
                I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
                I915_WRITE(MIPI_HFP_COUNT(port), hfp);
 
@@ -774,16 +792,39 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
        }
 
        for_each_dsi_port(port, intel_dsi->ports) {
-               /* escape clock divider, 20MHz, shared for A and C.
-                * device ready must be off when doing this! txclkesc? */
-               tmp = I915_READ(MIPI_CTRL(PORT_A));
-               tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
-               I915_WRITE(MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1);
-
-               /* read request priority is per pipe */
-               tmp = I915_READ(MIPI_CTRL(port));
-               tmp &= ~READ_REQUEST_PRIORITY_MASK;
-               I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH);
+               if (IS_VALLEYVIEW(dev)) {
+                       /*
+                        * escape clock divider, 20MHz, shared for A and C.
+                        * device ready must be off when doing this! txclkesc?
+                        */
+                       tmp = I915_READ(MIPI_CTRL(PORT_A));
+                       tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+                       I915_WRITE(MIPI_CTRL(PORT_A), tmp |
+                                       ESCAPE_CLOCK_DIVIDER_1);
+
+                       /* read request priority is per pipe */
+                       tmp = I915_READ(MIPI_CTRL(port));
+                       tmp &= ~READ_REQUEST_PRIORITY_MASK;
+                       I915_WRITE(MIPI_CTRL(port), tmp |
+                                       READ_REQUEST_PRIORITY_HIGH);
+               } else if (IS_BROXTON(dev)) {
+                       /*
+                        * FIXME:
+                        * BXT can connect any PIPE to any MIPI port.
+                        * Select the pipe based on the MIPI port read from
+                        * VBT for now. Pick PIPE A for MIPI port A and C
+                        * for port C.
+                        */
+                       tmp = I915_READ(MIPI_CTRL(port));
+                       tmp &= ~BXT_PIPE_SELECT_MASK;
+
+                       if (port == PORT_A)
+                               tmp |= BXT_PIPE_SELECT_A;
+                       else if (port == PORT_C)
+                               tmp |= BXT_PIPE_SELECT_C;
+
+                       I915_WRITE(MIPI_CTRL(port), tmp);
+               }
 
                /* XXX: why here, why like this? handling in irq handler?! */
                I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
@@ -860,6 +901,17 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
                I915_WRITE(MIPI_INIT_COUNT(port),
                                txclkesc(intel_dsi->escape_clk_div, 100));
 
+               if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
+                       /*
+                        * BXT spec says write MIPI_INIT_COUNT for
+                        * both the ports, even if only one is
+                        * getting used. So write the other port
+                        * if not in dual link mode.
+                        */
+                       I915_WRITE(MIPI_INIT_COUNT(port ==
+                                               PORT_A ? PORT_C : PORT_A),
+                                       intel_dsi->init_count);
+               }
 
                /* recovery disables */
                I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
@@ -911,8 +963,8 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
        DRM_DEBUG_KMS("\n");
 
        intel_dsi_prepare(encoder);
+       intel_enable_dsi_pll(encoder);
 
-       vlv_enable_dsi_pll(encoder);
 }
 
 static enum drm_connector_status
index 42a68593e32aac97b8a0c43dcae13c9a5fcafcb7..5cc46b4131f72450e785b9766720d81d3c2c5900 100644 (file)
@@ -124,8 +124,8 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
        return container_of(encoder, struct intel_dsi, base.base);
 }
 
-extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
-extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
+extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
 extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
 
 struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
index c6a8975b128f123da9ebae9b7236ae37e79d85c0..f335e6cd443141af3743fe22ccb3a59efd6dfdcb 100644 (file)
@@ -246,7 +246,7 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
        vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
 }
 
-void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        u32 tmp;
@@ -276,7 +276,7 @@ void vlv_enable_dsi_pll(struct intel_encoder *encoder)
        DRM_DEBUG_KMS("DSI PLL locked\n");
 }
 
-void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        u32 tmp;
@@ -293,6 +293,26 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
+static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       u32 val;
+
+       DRM_DEBUG_KMS("\n");
+
+       val = I915_READ(BXT_DSI_PLL_ENABLE);
+       val &= ~BXT_DSI_PLL_DO_ENABLE;
+       I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+
+       /*
+        * PLL lock should deassert within 200us.
+        * Wait up to 1ms before timing out.
+        */
+       if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
+                                       & BXT_DSI_PLL_LOCKED) == 0, 1))
+               DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
+}
+
 static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
 {
        int bpp = dsi_pixel_format_bpp(pixel_format);
@@ -363,3 +383,106 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
 
        return pclk;
 }
+
+static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       u8 dsi_ratio;
+       u32 dsi_clk;
+       u32 val;
+
+       dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
+                       intel_dsi->lane_count);
+
+       /*
+        * From clock diagram, to get PLL ratio divider, divide double of DSI
+        * link rate (i.e., 2*8x=16x frequency value) by ref clock. Make sure to
+        * round 'up' the result
+        */
+       dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
+       if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
+                       dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
+               DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
+               return false;
+       }
+
+       /*
+        * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
+        * Spec says both have to be programmed, even if one is not getting
+        * used. Configure MIPI_CLOCK_CTL dividers in modeset
+        */
+       val = I915_READ(BXT_DSI_PLL_CTL);
+       val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
+       val &= ~BXT_DSI_FREQ_SEL_MASK;
+       val &= ~BXT_DSI_PLL_RATIO_MASK;
+       val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
+
+       /* As per recommendation from hardware team,
+        * Prog PVD ratio =1 if dsi ratio <= 50
+        */
+       if (dsi_ratio <= 50) {
+               val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
+               val |= BXT_DSI_PLL_PVD_RATIO_1;
+       }
+
+       I915_WRITE(BXT_DSI_PLL_CTL, val);
+       POSTING_READ(BXT_DSI_PLL_CTL);
+
+       return true;
+}
+
+static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       u32 val;
+
+       DRM_DEBUG_KMS("\n");
+
+       val = I915_READ(BXT_DSI_PLL_ENABLE);
+
+       if (val & BXT_DSI_PLL_DO_ENABLE) {
+               WARN(1, "DSI PLL already enabled. Disabling it.\n");
+               val &= ~BXT_DSI_PLL_DO_ENABLE;
+               I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+       }
+
+       /* Configure PLL vales */
+       if (!bxt_configure_dsi_pll(encoder)) {
+               DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
+               return;
+       }
+
+       /* Enable DSI PLL */
+       val = I915_READ(BXT_DSI_PLL_ENABLE);
+       val |= BXT_DSI_PLL_DO_ENABLE;
+       I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+
+       /* Timeout and fail if PLL not locked */
+       if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
+               DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
+               return;
+       }
+
+       DRM_DEBUG_KMS("DSI PLL locked\n");
+}
+
+void intel_enable_dsi_pll(struct intel_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+
+       if (IS_VALLEYVIEW(dev))
+               vlv_enable_dsi_pll(encoder);
+       else if (IS_BROXTON(dev))
+               bxt_enable_dsi_pll(encoder);
+}
+
+void intel_disable_dsi_pll(struct intel_encoder *encoder)
+{
+       struct drm_device *dev = encoder->base.dev;
+
+       if (IS_VALLEYVIEW(dev))
+               vlv_disable_dsi_pll(encoder);
+       else if (IS_BROXTON(dev))
+               bxt_disable_dsi_pll(encoder);
+}
index dc532bb61d229834dafeaf7d3ba54f50042763ea..c80fe1f49ede63d067b75ab109402a6c64edd6fb 100644 (file)
@@ -201,6 +201,8 @@ intel_dvo_mode_valid(struct drm_connector *connector,
                     struct drm_display_mode *mode)
 {
        struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+       int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+       int target_clock = mode->clock;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
                return MODE_NO_DBLESCAN;
@@ -212,8 +214,13 @@ intel_dvo_mode_valid(struct drm_connector *connector,
                        return MODE_PANEL;
                if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
                        return MODE_PANEL;
+
+               target_clock = intel_dvo->panel_fixed_mode->clock;
        }
 
+       if (target_clock > max_dotclk)
+               return MODE_CLOCK_HIGH;
+
        return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
 }
 
index 1f97fb548c2ac6b937e2b5e8f0a7a29c9592a9b2..6777fbb25d07d341d7a16e07bea4e5dfa36d0b1f 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+/*
+ * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
+ * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
+ * origin so the x and y offsets can actually fit the registers. As a
+ * consequence, the fence doesn't really start exactly at the display plane
+ * address we program because it starts at the real start of the buffer, so we
+ * have to take this into consideration here.
+ */
+static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
+{
+       return crtc->base.y - crtc->adjusted_y;
+}
+
 static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
 {
        u32 fbc_ctl;
@@ -88,7 +101,7 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
 
        /* Clear old tags */
        for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
-               I915_WRITE(FBC_TAG + (i * 4), 0);
+               I915_WRITE(FBC_TAG(i), 0);
 
        if (IS_GEN4(dev_priv)) {
                u32 fbc_ctl2;
@@ -97,7 +110,7 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
                fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
                fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
                I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-               I915_WRITE(FBC_FENCE_OFF, crtc->base.y);
+               I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
        }
 
        /* enable it... */
@@ -135,7 +148,7 @@ static void g4x_fbc_enable(struct intel_crtc *crtc)
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
        dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
 
-       I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y);
+       I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
 
        /* enable it... */
        I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -177,6 +190,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        u32 dpfc_ctl;
        int threshold = dev_priv->fbc.threshold;
+       unsigned int y_offset;
 
        dev_priv->fbc.enabled = true;
 
@@ -200,7 +214,8 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
        if (IS_GEN5(dev_priv))
                dpfc_ctl |= obj->fence_reg;
 
-       I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y);
+       y_offset = get_crtc_fence_y_offset(crtc);
+       I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
        I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -208,7 +223,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
        if (IS_GEN6(dev_priv)) {
                I915_WRITE(SNB_DPFC_CTL_SA,
                           SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-               I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
+               I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
        }
 
        intel_fbc_nuke(dev_priv);
@@ -272,23 +287,23 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
        if (dev_priv->fbc.false_color)
                dpfc_ctl |= FBC_CTL_FALSE_COLOR;
 
-       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
        if (IS_IVYBRIDGE(dev_priv)) {
                /* WaFbcAsynchFlipDisableFbcQueue:ivb */
                I915_WRITE(ILK_DISPLAY_CHICKEN1,
                           I915_READ(ILK_DISPLAY_CHICKEN1) |
                           ILK_FBCQ_DIS);
-       } else {
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
                I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
                           I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
                           HSW_FBCQ_DIS);
        }
 
+       I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+
        I915_WRITE(SNB_DPFC_CTL_SA,
                   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-       I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y);
+       I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
 
        intel_fbc_nuke(dev_priv);
 
@@ -308,6 +323,18 @@ bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
        return dev_priv->fbc.enabled;
 }
 
+static void intel_fbc_enable(struct intel_crtc *crtc,
+                            const struct drm_framebuffer *fb)
+{
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+       dev_priv->fbc.enable_fbc(crtc);
+
+       dev_priv->fbc.crtc = crtc;
+       dev_priv->fbc.fb_id = fb->base.id;
+       dev_priv->fbc.y = crtc->base.y;
+}
+
 static void intel_fbc_work_fn(struct work_struct *__work)
 {
        struct intel_fbc_work *work =
@@ -321,13 +348,8 @@ static void intel_fbc_work_fn(struct work_struct *__work)
                /* Double check that we haven't switched fb without cancelling
                 * the prior work.
                 */
-               if (crtc_fb == work->fb) {
-                       dev_priv->fbc.enable_fbc(work->crtc);
-
-                       dev_priv->fbc.crtc = work->crtc;
-                       dev_priv->fbc.fb_id = crtc_fb->base.id;
-                       dev_priv->fbc.y = work->crtc->base.y;
-               }
+               if (crtc_fb == work->fb)
+                       intel_fbc_enable(work->crtc, work->fb);
 
                dev_priv->fbc.fbc_work = NULL;
        }
@@ -361,7 +383,7 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
        dev_priv->fbc.fbc_work = NULL;
 }
 
-static void intel_fbc_enable(struct intel_crtc *crtc)
+static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
 {
        struct intel_fbc_work *work;
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -373,7 +395,7 @@ static void intel_fbc_enable(struct intel_crtc *crtc)
        work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (work == NULL) {
                DRM_ERROR("Failed to allocate FBC work structure\n");
-               dev_priv->fbc.enable_fbc(crtc);
+               intel_fbc_enable(crtc, crtc->base.primary->fb);
                return;
        }
 
@@ -473,6 +495,12 @@ const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
                return "rotation unsupported";
        case FBC_IN_DBG_MASTER:
                return "Kernel debugger is active";
+       case FBC_BAD_STRIDE:
+               return "framebuffer stride not supported";
+       case FBC_PIXEL_RATE:
+               return "pixel rate is too big";
+       case FBC_PIXEL_FORMAT:
+               return "pixel format is invalid";
        default:
                MISSING_CASE(reason);
                return "unknown reason";
@@ -542,6 +570,16 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
 {
        int compression_threshold = 1;
        int ret;
+       u64 end;
+
+       /* The FBC hardware for BDW/SKL doesn't have access to the stolen
+        * reserved range size, so it always assumes the maximum (8mb) is used.
+        * If we enable FBC using a CFB on that memory range we'll get FIFO
+        * underruns, even if that range is not reserved by the BIOS. */
+       if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+               end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
+       else
+               end = dev_priv->gtt.stolen_usable_size;
 
        /* HACK: This code depends on what we will do in *_enable_fbc. If that
         * code changes, this code needs to change as well.
@@ -551,7 +589,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
         */
 
        /* Try to over-allocate to reduce reallocations and fragmentation. */
-       ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096);
+       ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
+                                                  4096, 0, end);
        if (ret == 0)
                return compression_threshold;
 
@@ -561,7 +600,8 @@ again:
            (fb_cpp == 2 && compression_threshold == 2))
                return 0;
 
-       ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096);
+       ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
+                                                  4096, 0, end);
        if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
                return 0;
        } else if (ret) {
@@ -613,8 +653,9 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
 
        dev_priv->fbc.uncompressed_size = size;
 
-       DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
-                     size);
+       DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
+                     dev_priv->fbc.compressed_fb.size,
+                     dev_priv->fbc.threshold);
 
        return 0;
 
@@ -664,6 +705,50 @@ static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
        return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
 }
 
+static bool stride_is_valid(struct drm_i915_private *dev_priv,
+                           unsigned int stride)
+{
+       /* These should have been caught earlier. */
+       WARN_ON(stride < 512);
+       WARN_ON((stride & (64 - 1)) != 0);
+
+       /* Below are the additional FBC restrictions. */
+
+       if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
+               return stride == 4096 || stride == 8192;
+
+       if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
+               return false;
+
+       if (stride > 16384)
+               return false;
+
+       return true;
+}
+
+static bool pixel_format_is_valid(struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = fb->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+               return true;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_RGB565:
+               /* 16bpp not supported on gen2 */
+               if (IS_GEN2(dev))
+                       return false;
+               /* WaFbcOnly1to1Ratio:ctg */
+               if (IS_G4X(dev_priv))
+                       return false;
+               return true;
+       default:
+               return false;
+       }
+}
+
 /**
  * __intel_fbc_update - enable/disable FBC as needed, unlocked
  * @dev_priv: i915 device instance
@@ -774,12 +859,30 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
                goto out_disable;
        }
 
+       if (!stride_is_valid(dev_priv, fb->pitches[0])) {
+               set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
+               goto out_disable;
+       }
+
+       if (!pixel_format_is_valid(fb)) {
+               set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT);
+               goto out_disable;
+       }
+
        /* If the kernel debugger is active, always disable compression */
        if (in_dbg_master()) {
                set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
                goto out_disable;
        }
 
+       /* WaFbcExceedCdClockThreshold:hsw,bdw */
+       if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
+           ilk_pipe_pixel_rate(intel_crtc->config) >=
+           dev_priv->cdclk_freq * 95 / 100) {
+               set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE);
+               goto out_disable;
+       }
+
        if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
                                drm_format_plane_cpp(fb->pixel_format, 0))) {
                set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
@@ -824,7 +927,7 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
                __intel_fbc_disable(dev_priv);
        }
 
-       intel_fbc_enable(intel_crtc);
+       intel_fbc_schedule_enable(intel_crtc);
        dev_priv->fbc.no_fbc_reason = FBC_OK;
        return;
 
index 8c6a6fa460057d38c71fe8f01986cde39f234442..65329127f0b9a2eed2ff29178c4fbd2265840aa5 100644 (file)
@@ -263,7 +263,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
+       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
                      fb->width, fb->height,
                      i915_gem_obj_ggtt_offset(obj), obj);
 
@@ -541,16 +541,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
        struct intel_crtc *intel_crtc;
        unsigned int max_size = 0;
 
-       if (!i915.fastboot)
-               return false;
-
        /* Find the largest fb */
        for_each_crtc(dev, crtc) {
                struct drm_i915_gem_object *obj =
                        intel_fb_obj(crtc->primary->state->fb);
                intel_crtc = to_intel_crtc(crtc);
 
-               if (!intel_crtc->active || !obj) {
+               if (!crtc->state->active || !obj) {
                        DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
                                      pipe_name(intel_crtc->pipe));
                        continue;
@@ -575,7 +572,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
 
                intel_crtc = to_intel_crtc(crtc);
 
-               if (!intel_crtc->active) {
+               if (!crtc->state->active) {
                        DRM_DEBUG_KMS("pipe %c not active, skipping\n",
                                      pipe_name(intel_crtc->pipe));
                        continue;
@@ -638,7 +635,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
        for_each_crtc(dev, crtc) {
                intel_crtc = to_intel_crtc(crtc);
 
-               if (!intel_crtc->active)
+               if (!crtc->state->active)
                        continue;
 
                WARN(!crtc->primary->fb,
@@ -689,6 +686,8 @@ int intel_fbdev_init(struct drm_device *dev)
                return ret;
        }
 
+       ifbdev->helper.atomic = true;
+
        dev_priv->fbdev = ifbdev;
        INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
 
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
new file mode 100644 (file)
index 0000000..4ec2d27
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright Â© 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _INTEL_GUC_H_
+#define _INTEL_GUC_H_
+
+#include "intel_guc_fwif.h"
+#include "i915_guc_reg.h"
+
+struct i915_guc_client {
+       struct drm_i915_gem_object *client_obj;
+       struct intel_context *owner;
+       struct intel_guc *guc;
+       uint32_t priority;
+       uint32_t ctx_index;
+
+       uint32_t proc_desc_offset;
+       uint32_t doorbell_offset;
+       uint32_t cookie;
+       uint16_t doorbell_id;
+       uint16_t padding;               /* Maintain alignment           */
+
+       uint32_t wq_offset;
+       uint32_t wq_size;
+
+       spinlock_t wq_lock;             /* Protects all data below      */
+       uint32_t wq_tail;
+
+       /* GuC submission statistics & status */
+       uint64_t submissions[I915_NUM_RINGS];
+       uint32_t q_fail;
+       uint32_t b_fail;
+       int retcode;
+};
+
+enum intel_guc_fw_status {
+       GUC_FIRMWARE_FAIL = -1,
+       GUC_FIRMWARE_NONE = 0,
+       GUC_FIRMWARE_PENDING,
+       GUC_FIRMWARE_SUCCESS
+};
+
+/*
+ * This structure encapsulates all the data needed during the process
+ * of fetching, caching, and loading the firmware image into the GuC.
+ */
+struct intel_guc_fw {
+       struct drm_device *             guc_dev;
+       const char *                    guc_fw_path;
+       size_t                          guc_fw_size;
+       struct drm_i915_gem_object *    guc_fw_obj;
+       enum intel_guc_fw_status        guc_fw_fetch_status;
+       enum intel_guc_fw_status        guc_fw_load_status;
+
+       uint16_t                        guc_fw_major_wanted;
+       uint16_t                        guc_fw_minor_wanted;
+       uint16_t                        guc_fw_major_found;
+       uint16_t                        guc_fw_minor_found;
+};
+
+struct intel_guc {
+       struct intel_guc_fw guc_fw;
+
+       uint32_t log_flags;
+       struct drm_i915_gem_object *log_obj;
+
+       struct drm_i915_gem_object *ctx_pool_obj;
+       struct ida ctx_ids;
+
+       struct i915_guc_client *execbuf_client;
+
+       spinlock_t host2guc_lock;       /* Protects all data below      */
+
+       DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
+       uint32_t db_cacheline;          /* Cyclic counter mod pagesize  */
+
+       /* Action status & statistics */
+       uint64_t action_count;          /* Total commands issued        */
+       uint32_t action_cmd;            /* Last command word            */
+       uint32_t action_status;         /* Last return status           */
+       uint32_t action_fail;           /* Total number of failures     */
+       int32_t action_err;             /* Last error code              */
+
+       uint64_t submissions[I915_NUM_RINGS];
+       uint32_t last_seqno[I915_NUM_RINGS];
+};
+
+/* intel_guc_loader.c */
+extern void intel_guc_ucode_init(struct drm_device *dev);
+extern int intel_guc_ucode_load(struct drm_device *dev);
+extern void intel_guc_ucode_fini(struct drm_device *dev);
+extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
+
+/* i915_guc_submission.c */
+int i915_guc_submission_init(struct drm_device *dev);
+int i915_guc_submission_enable(struct drm_device *dev);
+int i915_guc_submit(struct i915_guc_client *client,
+                   struct drm_i915_gem_request *rq);
+void i915_guc_submission_disable(struct drm_device *dev);
+void i915_guc_submission_fini(struct drm_device *dev);
+
+#endif
index 18d7f20936c8e28b1d057a48ea7f4eb8caab8b88..e1f47ba2b4b0017732b1b281746d3d396a56c3d0 100644 (file)
  * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
  */
 
-#define GFXCORE_FAMILY_GEN8            11
 #define GFXCORE_FAMILY_GEN9            12
-#define GFXCORE_FAMILY_FORCE_ULONG     0x7fffffff
+#define GFXCORE_FAMILY_UNKNOWN         0x7fffffff
 
-#define GUC_CTX_PRIORITY_CRITICAL      0
+#define GUC_CTX_PRIORITY_KMD_HIGH      0
 #define GUC_CTX_PRIORITY_HIGH          1
-#define GUC_CTX_PRIORITY_NORMAL                2
-#define GUC_CTX_PRIORITY_LOW           3
+#define GUC_CTX_PRIORITY_KMD_NORMAL    2
+#define GUC_CTX_PRIORITY_NORMAL                3
 
 #define GUC_MAX_GPU_CONTEXTS           1024
-#define        GUC_INVALID_CTX_ID              (GUC_MAX_GPU_CONTEXTS + 1)
+#define        GUC_INVALID_CTX_ID              GUC_MAX_GPU_CONTEXTS
 
 /* Work queue item header definitions */
 #define WQ_STATUS_ACTIVE               1
@@ -76,6 +75,7 @@
 #define GUC_CTX_DESC_ATTR_RESET                (1 << 4)
 #define GUC_CTX_DESC_ATTR_WQLOCKED     (1 << 5)
 #define GUC_CTX_DESC_ATTR_PCH          (1 << 6)
+#define GUC_CTX_DESC_ATTR_TERMINATED   (1 << 7)
 
 /* The guc control data is 10 DWORDs */
 #define GUC_CTL_CTXINFO                        0
 #define   GUC_CTL_DISABLE_SCHEDULER    (1 << 4)
 #define   GUC_CTL_PREEMPTION_LOG       (1 << 5)
 #define   GUC_CTL_ENABLE_SLPC          (1 << 7)
+#define   GUC_CTL_RESET_ON_PREMPT_FAILURE      (1 << 8)
 #define GUC_CTL_DEBUG                  8
 #define   GUC_LOG_VERBOSITY_SHIFT      0
 #define   GUC_LOG_VERBOSITY_LOW                (0 << GUC_LOG_VERBOSITY_SHIFT)
 /* Verbosity range-check limits, without the shift */
 #define          GUC_LOG_VERBOSITY_MIN         0
 #define          GUC_LOG_VERBOSITY_MAX         3
+#define GUC_CTL_RSRVD                  9
 
-#define GUC_CTL_MAX_DWORDS             (GUC_CTL_DEBUG + 1)
+#define GUC_CTL_MAX_DWORDS             (GUC_CTL_RSRVD + 1)
 
 struct guc_doorbell_info {
        u32 db_status;
@@ -208,7 +210,9 @@ struct guc_context_desc {
 
        u32 engine_presence;
 
-       u32 reserved0[1];
+       u8 engine_suspended;
+
+       u8 reserved0[3];
        u64 reserved1[1];
 
        u64 desc_private;
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
new file mode 100644 (file)
index 0000000..e0601cc
--- /dev/null
@@ -0,0 +1,613 @@
+/*
+ * Copyright Â© 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Vinit Azad <vinit.azad@intel.com>
+ *    Ben Widawsky <ben@bwidawsk.net>
+ *    Dave Gordon <david.s.gordon@intel.com>
+ *    Alex Dai <yu.dai@intel.com>
+ */
+#include <linux/firmware.h>
+#include "i915_drv.h"
+#include "intel_guc.h"
+
+/**
+ * DOC: GuC
+ *
+ * intel_guc:
+ * Top level structure of guc. It handles firmware loading and manages client
+ * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
+ * ExecList submission.
+ *
+ * Firmware versioning:
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ * The firmware installation package will install (symbolic link) proper version
+ * of firmware.
+ *
+ * GuC address space:
+ * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
+ * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
+ * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
+ * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
+ *
+ * Firmware log:
+ * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
+ * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
+ * i915_guc_load_status will print out firmware loading status and scratch
+ * registers value.
+ *
+ */
+
+#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
+MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
+
+/* User-friendly representation of an enum */
+const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
+{
+       switch (status) {
+       case GUC_FIRMWARE_FAIL:
+               return "FAIL";
+       case GUC_FIRMWARE_NONE:
+               return "NONE";
+       case GUC_FIRMWARE_PENDING:
+               return "PENDING";
+       case GUC_FIRMWARE_SUCCESS:
+               return "SUCCESS";
+       default:
+               return "UNKNOWN!";
+       }
+};
+
+static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *ring;
+       int i, irqs;
+
+       /* tell all command streamers NOT to forward interrupts and vblank to GuC */
+       irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
+       irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
+       for_each_ring(ring, dev_priv, i)
+               I915_WRITE(RING_MODE_GEN7(ring), irqs);
+
+       /* tell DE to send nothing to GuC */
+       I915_WRITE(DE_GUCRMR, ~0);
+
+       /* route all GT interrupts to the host */
+       I915_WRITE(GUC_BCS_RCS_IER, 0);
+       I915_WRITE(GUC_VCS2_VCS1_IER, 0);
+       I915_WRITE(GUC_WD_VECS_IER, 0);
+}
+
+static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *ring;
+       int i, irqs;
+
+       /* tell all command streamers to forward interrupts and vblank to GuC */
+       irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
+       irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
+       for_each_ring(ring, dev_priv, i)
+               I915_WRITE(RING_MODE_GEN7(ring), irqs);
+
+       /* tell DE to send (all) flip_done to GuC */
+       irqs = DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEA_SPR_FLIP_DONE |
+              DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEB_SPR_FLIP_DONE |
+              DERRMR_PIPEC_PRI_FLIP_DONE | DERRMR_PIPEC_SPR_FLIP_DONE;
+       /* Unmasked bits will cause GuC response message to be sent */
+       I915_WRITE(DE_GUCRMR, ~irqs);
+
+       /* route USER_INTERRUPT to Host, all others are sent to GuC. */
+       irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+              GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+       /* These three registers have the same bit definitions */
+       I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
+       I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
+       I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+}
+
+static u32 get_gttype(struct drm_i915_private *dev_priv)
+{
+       /* XXX: GT type based on PCI device ID? field seems unused by fw */
+       return 0;
+}
+
+static u32 get_core_family(struct drm_i915_private *dev_priv)
+{
+       switch (INTEL_INFO(dev_priv)->gen) {
+       case 9:
+               return GFXCORE_FAMILY_GEN9;
+
+       default:
+               DRM_ERROR("GUC: unsupported core family\n");
+               return GFXCORE_FAMILY_UNKNOWN;
+       }
+}
+
+static void set_guc_init_params(struct drm_i915_private *dev_priv)
+{
+       struct intel_guc *guc = &dev_priv->guc;
+       u32 params[GUC_CTL_MAX_DWORDS];
+       int i;
+
+       memset(&params, 0, sizeof(params));
+
+       params[GUC_CTL_DEVICE_INFO] |=
+               (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
+               (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
+
+       /*
+        * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
+        * second. This ARAR is calculated by:
+        * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
+        */
+       params[GUC_CTL_ARAT_HIGH] = 0;
+       params[GUC_CTL_ARAT_LOW] = 100000000;
+
+       params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
+
+       params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
+                       GUC_CTL_VCS2_ENABLED;
+
+       if (i915.guc_log_level >= 0) {
+               params[GUC_CTL_LOG_PARAMS] = guc->log_flags;
+               params[GUC_CTL_DEBUG] =
+                       i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
+       }
+
+       /* If GuC submission is enabled, set up additional parameters here */
+       if (i915.enable_guc_submission) {
+               u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
+               u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
+
+               pgs >>= PAGE_SHIFT;
+               params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
+                       (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
+
+               params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
+
+               /* Unmask this bit to enable the GuC's internal scheduler */
+               params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
+       }
+
+       I915_WRITE(SOFT_SCRATCH(0), 0);
+
+       for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+               I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+}
+
+/*
+ * Read the GuC status register (GUC_STATUS) and store it in the
+ * specified location; then return a boolean indicating whether
+ * the value matches either of two values representing completion
+ * of the GuC boot process.
+ *
+ * This is used for polling the GuC status in a wait_for_atomic()
+ * loop below.
+ */
+static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
+                                     u32 *status)
+{
+       u32 val = I915_READ(GUC_STATUS);
+       *status = val;
+       return ((val & GS_UKERNEL_MASK) == GS_UKERNEL_READY ||
+               (val & GS_UKERNEL_MASK) == GS_UKERNEL_LAPIC_DONE);
+}
+
+/*
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * GuC Firmware layout:
+ * +-------------------------------+  ----
+ * |          CSS header           |  128B
+ * | contains major/minor version  |
+ * +-------------------------------+  ----
+ * |             uCode             |
+ * +-------------------------------+  ----
+ * |         RSA signature         |  256B
+ * +-------------------------------+  ----
+ *
+ * Architecturally, the DMA engine is bidirectional, and can potentially even
+ * transfer between GTT locations. This functionality is left out of the API
+ * for now as there is no need for it.
+ *
+ * Note that GuC needs the CSS header plus uKernel code to be copied by the
+ * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
+ */
+
+#define UOS_CSS_HEADER_OFFSET          0
+#define UOS_VER_MINOR_OFFSET           0x44
+#define UOS_VER_MAJOR_OFFSET           0x46
+#define UOS_CSS_HEADER_SIZE            0x80
+#define UOS_RSA_SIG_SIZE               0x100
+
+static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
+{
+       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj;
+       unsigned long offset;
+       struct sg_table *sg = fw_obj->pages;
+       u32 status, ucode_size, rsa[UOS_RSA_SIG_SIZE / sizeof(u32)];
+       int i, ret = 0;
+
+       /* uCode size, also is where RSA signature starts */
+       offset = ucode_size = guc_fw->guc_fw_size - UOS_RSA_SIG_SIZE;
+       I915_WRITE(DMA_COPY_SIZE, ucode_size);
+
+       /* Copy RSA signature from the fw image to HW for verification */
+       sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset);
+       for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++)
+               I915_WRITE(UOS_RSA_SCRATCH_0 + i * sizeof(u32), rsa[i]);
+
+       /* Set the source address for the new blob */
+       offset = i915_gem_obj_ggtt_offset(fw_obj);
+       I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+       I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+       /*
+        * Set the DMA destination. Current uCode expects the code to be
+        * loaded at 8k; locations below this are used for the stack.
+        */
+       I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
+       I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+       /* Finally start the DMA */
+       I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+
+       /*
+        * Spin-wait for the DMA to complete & the GuC to start up.
+        * NB: Docs recommend not using the interrupt for completion.
+        * Measurements indicate this should take no more than 20ms, so a
+        * timeout here indicates that the GuC has failed and is unusable.
+        * (Higher levels of the driver will attempt to fall back to
+        * execlist mode if this happens.)
+        */
+       ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100);
+
+       DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
+                       I915_READ(DMA_CTRL), status);
+
+       if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
+               DRM_ERROR("GuC firmware signature verification failed\n");
+               ret = -ENOEXEC;
+       }
+
+       DRM_DEBUG_DRIVER("returning %d\n", ret);
+
+       return ret;
+}
+
+/*
+ * Load the GuC firmware blob into the MinuteIA.
+ */
+static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
+{
+       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       struct drm_device *dev = dev_priv->dev;
+       int ret;
+
+       ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
+       if (ret) {
+               DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
+               return ret;
+       }
+
+       ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
+       if (ret) {
+               DRM_DEBUG_DRIVER("pin failed %d\n", ret);
+               return ret;
+       }
+
+       /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
+       I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+       /* init WOPCM */
+       I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
+       I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
+
+       /* Enable MIA caching. GuC clock gating is disabled. */
+       I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
+
+       /* WaDisableMinuteIaClockGating:skl,bxt */
+       if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
+           (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) {
+               I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
+                                             ~GUC_ENABLE_MIA_CLOCK_GATING));
+       }
+
+       /* WaC6DisallowByGfxPause*/
+       I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
+
+       if (IS_BROXTON(dev))
+               I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+       else
+               I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+
+       if (IS_GEN9(dev)) {
+               /* DOP Clock Gating Enable for GuC clocks */
+               I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
+                                           I915_READ(GEN7_MISCCPCTL)));
+
+               /* allows for 5us before GT can go to RC6 */
+               I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
+       }
+
+       set_guc_init_params(dev_priv);
+
+       ret = guc_ucode_xfer_dma(dev_priv);
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+       /*
+        * We keep the object pages for reuse during resume. But we can unpin it
+        * now that DMA has completed, so it doesn't continue to take up space.
+        */
+       i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj);
+
+       return ret;
+}
+
+/**
+ * intel_guc_ucode_load() - load GuC uCode into the device
+ * @dev:       drm device
+ *
+ * Called from gem_init_hw() during driver loading and also after a GPU reset.
+ *
+ * The firmware image should have already been fetched into memory by the
+ * earlier call to intel_guc_ucode_init(), so here we need only check that
+ * is succeeded, and then transfer the image to the h/w.
+ *
+ * Return:     non-zero code on error
+ */
+int intel_guc_ucode_load(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       int err = 0;
+
+       DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+               intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+
+       direct_interrupts_to_host(dev_priv);
+       i915_guc_submission_disable(dev);
+
+       if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
+               return 0;
+
+       if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
+           guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
+               return -ENOEXEC;
+
+       guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+
+       DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
+               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+
+       switch (guc_fw->guc_fw_fetch_status) {
+       case GUC_FIRMWARE_FAIL:
+               /* something went wrong :( */
+               err = -EIO;
+               goto fail;
+
+       case GUC_FIRMWARE_NONE:
+       case GUC_FIRMWARE_PENDING:
+       default:
+               /* "can't happen" */
+               WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
+                       guc_fw->guc_fw_path,
+                       intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+                       guc_fw->guc_fw_fetch_status);
+               err = -ENXIO;
+               goto fail;
+
+       case GUC_FIRMWARE_SUCCESS:
+               break;
+       }
+
+       err = i915_guc_submission_init(dev);
+       if (err)
+               goto fail;
+
+       err = guc_ucode_xfer(dev_priv);
+       if (err)
+               goto fail;
+
+       guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
+
+       DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+               intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+
+       if (i915.enable_guc_submission) {
+               err = i915_guc_submission_enable(dev);
+               if (err)
+                       goto fail;
+               direct_interrupts_to_guc(dev_priv);
+       }
+
+       return 0;
+
+fail:
+       if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
+               guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
+
+       direct_interrupts_to_host(dev_priv);
+       i915_guc_submission_disable(dev);
+
+       return err;
+}
+
+static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
+{
+       struct drm_i915_gem_object *obj;
+       const struct firmware *fw;
+       const u8 *css_header;
+       const size_t minsize = UOS_CSS_HEADER_SIZE + UOS_RSA_SIG_SIZE;
+       const size_t maxsize = GUC_WOPCM_SIZE_VALUE + UOS_RSA_SIG_SIZE
+                       - 0x8000; /* 32k reserved (8K stack + 24k context) */
+       int err;
+
+       DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
+               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+
+       err = request_firmware(&fw, guc_fw->guc_fw_path, &dev->pdev->dev);
+       if (err)
+               goto fail;
+       if (!fw)
+               goto fail;
+
+       DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
+               guc_fw->guc_fw_path, fw);
+       DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
+               fw->size, minsize, maxsize);
+
+       /* Check the size of the blob befoe examining buffer contents */
+       if (fw->size < minsize || fw->size > maxsize)
+               goto fail;
+
+       /*
+        * The GuC firmware image has the version number embedded at a well-known
+        * offset within the firmware blob; note that major / minor version are
+        * TWO bytes each (i.e. u16), although all pointers and offsets are defined
+        * in terms of bytes (u8).
+        */
+       css_header = fw->data + UOS_CSS_HEADER_OFFSET;
+       guc_fw->guc_fw_major_found = *(u16 *)(css_header + UOS_VER_MAJOR_OFFSET);
+       guc_fw->guc_fw_minor_found = *(u16 *)(css_header + UOS_VER_MINOR_OFFSET);
+
+       if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
+           guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
+               DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
+                       guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
+                       guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+               err = -ENOEXEC;
+               goto fail;
+       }
+
+       DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
+                       guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
+                       guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+
+       obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
+       if (IS_ERR_OR_NULL(obj)) {
+               err = obj ? PTR_ERR(obj) : -ENOMEM;
+               goto fail;
+       }
+
+       guc_fw->guc_fw_obj = obj;
+       guc_fw->guc_fw_size = fw->size;
+
+       DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
+                       guc_fw->guc_fw_obj);
+
+       release_firmware(fw);
+       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
+       return;
+
+fail:
+       DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+               err, fw, guc_fw->guc_fw_obj);
+       DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
+                 guc_fw->guc_fw_path, err);
+
+       obj = guc_fw->guc_fw_obj;
+       if (obj)
+               drm_gem_object_unreference(&obj->base);
+       guc_fw->guc_fw_obj = NULL;
+
+       release_firmware(fw);           /* OK even if fw is NULL */
+       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+}
+
+/**
+ * intel_guc_ucode_init() - define parameters and fetch firmware
+ * @dev:       drm device
+ *
+ * Called early during driver load, but after GEM is initialised.
+ * The device struct_mutex must be held by the caller, as we're
+ * going to allocate a GEM object to hold the firmware image.
+ *
+ * The firmware will be transferred to the GuC's memory later,
+ * when intel_guc_ucode_load() is called.
+ */
+void intel_guc_ucode_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       const char *fw_path;
+
+       if (!HAS_GUC_SCHED(dev))
+               i915.enable_guc_submission = false;
+
+       if (!HAS_GUC_UCODE(dev)) {
+               fw_path = NULL;
+       } else if (IS_SKYLAKE(dev)) {
+               fw_path = I915_SKL_GUC_UCODE;
+               guc_fw->guc_fw_major_wanted = 4;
+               guc_fw->guc_fw_minor_wanted = 3;
+       } else {
+               i915.enable_guc_submission = false;
+               fw_path = "";   /* unknown device */
+       }
+
+       guc_fw->guc_dev = dev;
+       guc_fw->guc_fw_path = fw_path;
+       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
+       guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
+
+       if (fw_path == NULL)
+               return;
+
+       if (*fw_path == '\0') {
+               DRM_ERROR("No GuC firmware known for this platform\n");
+               guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+               return;
+       }
+
+       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
+       DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
+       guc_fw_fetch(dev, guc_fw);
+       /* status must now be FAIL or SUCCESS */
+}
+
+/**
+ * intel_guc_ucode_fini() - clean up all allocated resources
+ * @dev:       drm device
+ */
+void intel_guc_ucode_fini(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+
+       direct_interrupts_to_host(dev_priv);
+       i915_guc_submission_fini(dev);
+
+       if (guc_fw->guc_fw_obj)
+               drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
+       guc_fw->guc_fw_obj = NULL;
+
+       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
+}
index dcd336bcdfe750037901f87a6fb5e2c78c57b17c..bb33c66b0b121ef67cfceedaf1aa062b9d32cd86 100644 (file)
@@ -848,8 +848,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
        u32 hdmi_val;
 
        hdmi_val = SDVO_ENCODING_HDMI;
-       if (!HAS_PCH_SPLIT(dev))
-               hdmi_val |= intel_hdmi->color_range;
+       if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+               hdmi_val |= HDMI_COLOR_RANGE_16_235;
        if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
                hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
        if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1260,11 +1260,12 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
 
        if (intel_hdmi->color_range_auto) {
                /* See CEA-861-E - 5.1 Default Encoding Parameters */
-               if (pipe_config->has_hdmi_sink &&
-                   drm_match_cea_mode(adjusted_mode) > 1)
-                       intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
-               else
-                       intel_hdmi->color_range = 0;
+               pipe_config->limited_color_range =
+                       pipe_config->has_hdmi_sink &&
+                       drm_match_cea_mode(adjusted_mode) > 1;
+       } else {
+               pipe_config->limited_color_range =
+                       intel_hdmi->limited_color_range;
        }
 
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
@@ -1273,9 +1274,6 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
                clock_12bpc *= 2;
        }
 
-       if (intel_hdmi->color_range)
-               pipe_config->limited_color_range = true;
-
        if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
                pipe_config->has_pch_encoder = true;
 
@@ -1331,22 +1329,23 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
 }
 
 static bool
-intel_hdmi_set_edid(struct drm_connector *connector)
+intel_hdmi_set_edid(struct drm_connector *connector, bool force)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
        struct intel_encoder *intel_encoder =
                &hdmi_to_dig_port(intel_hdmi)->base;
        enum intel_display_power_domain power_domain;
-       struct edid *edid;
+       struct edid *edid = NULL;
        bool connected = false;
 
        power_domain = intel_display_port_power_domain(intel_encoder);
        intel_display_power_get(dev_priv, power_domain);
 
-       edid = drm_get_edid(connector,
-                           intel_gmbus_get_adapter(dev_priv,
-                                                   intel_hdmi->ddc_bus));
+       if (force)
+               edid = drm_get_edid(connector,
+                                   intel_gmbus_get_adapter(dev_priv,
+                                   intel_hdmi->ddc_bus));
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -1374,13 +1373,26 @@ static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
        enum drm_connector_status status;
+       struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
+       bool live_status = false;
+       unsigned int retry = 3;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.id, connector->name);
 
+       while (!live_status && --retry) {
+               live_status = intel_digital_port_connected(dev_priv,
+                               hdmi_to_dig_port(intel_hdmi));
+               mdelay(10);
+       }
+
+       if (!live_status)
+               DRM_DEBUG_KMS("Live status not up!");
+
        intel_hdmi_unset_edid(connector);
 
-       if (intel_hdmi_set_edid(connector)) {
+       if (intel_hdmi_set_edid(connector, live_status)) {
                struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 
                hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@@ -1404,7 +1416,7 @@ intel_hdmi_force(struct drm_connector *connector)
        if (connector->status != connector_status_connected)
                return;
 
-       intel_hdmi_set_edid(connector);
+       intel_hdmi_set_edid(connector, true);
        hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
 }
 
@@ -1470,7 +1482,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
 
        if (property == dev_priv->broadcast_rgb_property) {
                bool old_auto = intel_hdmi->color_range_auto;
-               uint32_t old_range = intel_hdmi->color_range;
+               bool old_range = intel_hdmi->limited_color_range;
 
                switch (val) {
                case INTEL_BROADCAST_RGB_AUTO:
@@ -1478,18 +1490,18 @@ intel_hdmi_set_property(struct drm_connector *connector,
                        break;
                case INTEL_BROADCAST_RGB_FULL:
                        intel_hdmi->color_range_auto = false;
-                       intel_hdmi->color_range = 0;
+                       intel_hdmi->limited_color_range = false;
                        break;
                case INTEL_BROADCAST_RGB_LIMITED:
                        intel_hdmi->color_range_auto = false;
-                       intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
+                       intel_hdmi->limited_color_range = true;
                        break;
                default:
                        return -EINVAL;
                }
 
                if (old_auto == intel_hdmi->color_range_auto &&
-                   old_range == intel_hdmi->color_range)
+                   old_range == intel_hdmi->limited_color_range)
                        return 0;
 
                goto done;
@@ -1617,6 +1629,50 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
+static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+                                    bool reset)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       enum pipe pipe = crtc->pipe;
+       uint32_t val;
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+       if (reset)
+               val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+       else
+               val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+       if (crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+               if (reset)
+                       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+               else
+                       val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+       }
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+       val |= CHV_PCS_REQ_SOFTRESET_EN;
+       if (reset)
+               val &= ~DPIO_PCS_CLK_SOFT_RESET;
+       else
+               val |= DPIO_PCS_CLK_SOFT_RESET;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+       if (crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+               val |= CHV_PCS_REQ_SOFTRESET_EN;
+               if (reset)
+                       val &= ~DPIO_PCS_CLK_SOFT_RESET;
+               else
+                       val |= DPIO_PCS_CLK_SOFT_RESET;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+       }
+}
+
 static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1630,8 +1686,21 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 
        intel_hdmi_prepare(encoder);
 
+       /*
+        * Must trick the second common lane into life.
+        * Otherwise we can't even access the PLL.
+        */
+       if (ch == DPIO_CH0 && pipe == PIPE_B)
+               dport->release_cl2_override =
+                       !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+       chv_phy_powergate_lanes(encoder, true, 0x0);
+
        mutex_lock(&dev_priv->sb_lock);
 
+       /* Assert data lane reset */
+       chv_data_lane_soft_reset(encoder, true);
+
        /* program left/right clock distribution */
        if (pipe != PIPE_B) {
                val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
@@ -1683,6 +1752,39 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
+static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+       u32 val;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* disable left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
+       mutex_unlock(&dev_priv->sb_lock);
+
+       /*
+        * Leave the power down bit cleared for at least one
+        * lane so that chv_powergate_phy_ch() will power
+        * on something when the channel is otherwise unused.
+        * When the port is off and the override is removed
+        * the lanes power down anyway, so otherwise it doesn't
+        * really matter what the state of power down bits is
+        * after this.
+        */
+       chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
 static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1701,33 +1803,13 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 
 static void chv_hdmi_post_disable(struct intel_encoder *encoder)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       enum pipe pipe = intel_crtc->pipe;
-       u32 val;
 
        mutex_lock(&dev_priv->sb_lock);
 
-       /* Propagate soft reset to data lane reset */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
-       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
-       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+       /* Assert data lane reset */
+       chv_data_lane_soft_reset(encoder, true);
 
        mutex_unlock(&dev_priv->sb_lock);
 }
@@ -1758,23 +1840,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
        val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
        vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
 
-       /* Deassert soft data lane reset*/
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
-       val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
-       val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
-
        /* Program Tx latency optimal setting */
        for (i = 0; i < 4; i++) {
                /* Set the upar bit */
@@ -1817,6 +1882,9 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
                       DPIO_TX1_STAGGER_MULT(7) |
                       DPIO_TX2_STAGGER_MULT(5));
 
+       /* Deassert data lane reset */
+       chv_data_lane_soft_reset(encoder, false);
+
        /* Clear calc init */
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
        val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
@@ -1851,31 +1919,33 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
 
        for (i = 0; i < 4; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
                val &= ~DPIO_SWING_MARGIN000_MASK;
                val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
+
+               /*
+                * Supposedly this value shouldn't matter when unique transition
+                * scale is disabled, but in fact it does matter. Let's just
+                * always program the same value and hope it's OK.
+                */
+               val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+               val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
        }
 
-       /* Disable unique transition scale */
+       /*
+        * The document said it needs to set bit 27 for ch0 and bit 26
+        * for ch1. Might be a typo in the doc.
+        * For now, for this unique transition scale selection, set bit
+        * 27 for ch0 and ch1.
+        */
        for (i = 0; i < 4; i++) {
                val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
                val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
                vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
        }
 
-       /* Additional steps for 1200mV-0dB */
-#if 0
-       val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
-       if (ch)
-               val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
-       else
-               val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
-
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
-                       vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
-                               (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
-#endif
        /* Start swing calculation */
        val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
        val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
@@ -1899,6 +1969,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
        g4x_enable_hdmi(encoder);
 
        vlv_wait_port_ready(dev_priv, dport, 0x0);
+
+       /* Second common lane will stay alive on its own now */
+       if (dport->release_cl2_override) {
+               chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+               dport->release_cl2_override = false;
+       }
 }
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -1974,7 +2050,14 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                        intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
                else
                        intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
-               intel_encoder->hpd_pin = HPD_PORT_B;
+               /*
+                * On BXT A0/A1, sw needs to activate DDIA HPD logic and
+                * interrupts to check the external panel connection.
+                */
+               if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
+                       intel_encoder->hpd_pin = HPD_PORT_A;
+               else
+                       intel_encoder->hpd_pin = HPD_PORT_B;
                break;
        case PORT_C:
                if (IS_BROXTON(dev_priv))
@@ -2051,6 +2134,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        drm_connector_register(connector);
+       intel_hdmi->attached_connector = intel_connector;
 
        /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
         * 0xd.  Failure to do so will result in spurious interrupts being
@@ -2097,6 +2181,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
                intel_encoder->pre_enable = chv_hdmi_pre_enable;
                intel_encoder->enable = vlv_enable_hdmi;
                intel_encoder->post_disable = chv_hdmi_post_disable;
+               intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
        } else if (IS_VALLEYVIEW(dev)) {
                intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
                intel_encoder->pre_enable = vlv_hdmi_pre_enable;
index 7412caedcf7f98a2a5e494c41e2bad97f34d4e34..825fa7a8df86ec1aeaa7287c0cc88f1a4ecc0f89 100644 (file)
        reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
 }
 
+#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \
+       reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
+       reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
+}
+
 enum {
        ADVANCED_CONTEXT = 0,
-       LEGACY_CONTEXT,
+       LEGACY_32B_CONTEXT,
        ADVANCED_AD_CONTEXT,
        LEGACY_64B_CONTEXT
 };
-#define GEN8_CTX_MODE_SHIFT 3
+#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
+#define GEN8_CTX_ADDRESSING_MODE(dev)  (USES_FULL_48BIT_PPGTT(dev) ?\
+               LEGACY_64B_CONTEXT :\
+               LEGACY_32B_CONTEXT)
 enum {
        FAULT_AND_HANG = 0,
        FAULT_AND_HALT, /* Debug only */
@@ -213,6 +221,9 @@ enum {
 #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
 
 static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
+static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
+               struct drm_i915_gem_object *default_ctx_obj);
+
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -228,6 +239,12 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
 {
        WARN_ON(i915.enable_ppgtt == -1);
 
+       /* On platforms with execlist available, vGPU will only
+        * support execlist mode, no ring buffer mode.
+        */
+       if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
+               return 1;
+
        if (INTEL_INFO(dev)->gen >= 9)
                return 1;
 
@@ -255,25 +272,35 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
  */
 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
 {
-       u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
+       u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
+                       LRC_PPHWSP_PN * PAGE_SIZE;
 
        /* LRCA is required to be 4K aligned so the more significant 20 bits
         * are globally unique */
        return lrca >> 12;
 }
 
-static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
+static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
 {
-       struct intel_engine_cs *ring = rq->ring;
        struct drm_device *dev = ring->dev;
-       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+
+       return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
+               (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
+              (ring->id == VCS || ring->id == VCS2);
+}
+
+uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+                                    struct intel_engine_cs *ring)
+{
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
        uint64_t desc;
-       uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
+       uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
+                       LRC_PPHWSP_PN * PAGE_SIZE;
 
        WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
 
        desc = GEN8_CTX_VALID;
-       desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
+       desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
        if (IS_GEN8(ctx_obj->base.dev))
                desc |= GEN8_CTX_L3LLC_COHERENT;
        desc |= GEN8_CTX_PRIVILEGE;
@@ -285,10 +312,8 @@ static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
        /* desc |= GEN8_CTX_FORCE_RESTORE; */
 
        /* WaEnableForceRestoreInCtxtDescForVCS:skl */
-       if (IS_GEN9(dev) &&
-           INTEL_REVID(dev) <= SKL_REVID_B0 &&
-           (ring->id == BCS || ring->id == VCS ||
-           ring->id == VECS || ring->id == VCS2))
+       /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
+       if (disable_lite_restore_wa(ring))
                desc |= GEN8_CTX_FORCE_RESTORE;
 
        return desc;
@@ -304,13 +329,13 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
        uint64_t desc[2];
 
        if (rq1) {
-               desc[1] = execlists_ctx_descriptor(rq1);
+               desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
                rq1->elsp_submitted++;
        } else {
                desc[1] = 0;
        }
 
-       desc[0] = execlists_ctx_descriptor(rq0);
+       desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
        rq0->elsp_submitted++;
 
        /* You must always write both descriptors in the order below. */
@@ -324,7 +349,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
        I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
 
        /* ELSP is a wo register, use another nearby reg for posting */
-       POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
+       POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
        intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
        spin_unlock(&dev_priv->uncore.lock);
 }
@@ -342,16 +367,18 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
        WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
        WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
 
-       page = i915_gem_object_get_page(ctx_obj, 1);
+       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
        reg_state = kmap_atomic(page);
 
        reg_state[CTX_RING_TAIL+1] = rq->tail;
        reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
-       /* True PPGTT with dynamic page allocation: update PDP registers and
-        * point the unallocated PDPs to the scratch page
-        */
-       if (ppgtt) {
+       if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+               /* True 32b PPGTT with dynamic page allocation: update PDP
+                * registers and point the unallocated PDPs to scratch page.
+                * PML4 is allocated during ppgtt init, so this is not needed
+                * in 48-bit mode.
+                */
                ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
                ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
                ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
@@ -477,7 +504,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        u32 status_pointer;
        u8 read_pointer;
        u8 write_pointer;
-       u32 status;
+       u32 status = 0;
        u32 status_id;
        u32 submit_contexts = 0;
 
@@ -492,10 +519,8 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
 
        while (read_pointer < write_pointer) {
                read_pointer++;
-               status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-                               (read_pointer % GEN8_CSB_ENTRIES) * 8);
-               status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-                               (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
+               status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
+               status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
 
                if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
                        continue;
@@ -515,8 +540,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
                }
        }
 
-       if (submit_contexts != 0)
+       if (disable_lite_restore_wa(ring)) {
+               /* Prevent a ctx to preempt itself */
+               if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
+                   (submit_contexts != 0))
+                       execlists_context_unqueue(ring);
+       } else if (submit_contexts != 0) {
                execlists_context_unqueue(ring);
+       }
 
        spin_unlock(&ring->execlist_lock);
 
@@ -540,8 +571,6 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
 
        i915_gem_request_reference(request);
 
-       request->tail = request->ringbuf->tail;
-
        spin_lock_irq(&ring->execlist_lock);
 
        list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
@@ -694,13 +723,19 @@ static void
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *ring = request->ring;
+       struct drm_i915_private *dev_priv = request->i915;
 
        intel_logical_ring_advance(request->ringbuf);
 
+       request->tail = request->ringbuf->tail;
+
        if (intel_ring_stopped(ring))
                return;
 
-       execlists_context_queue(request);
+       if (dev_priv->guc.execbuf_client)
+               i915_guc_submit(dev_priv->guc.execbuf_client, request);
+       else
+               execlists_context_queue(request);
 }
 
 static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
@@ -767,8 +802,7 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
 /**
  * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
  *
- * @request: The request to start some new work for
- * @ctx: Logical ring context whose ringbuffer is being prepared.
+ * @req: The request to start some new work for
  * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
  *
  * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
@@ -988,34 +1022,54 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
        return 0;
 }
 
+static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
+               struct drm_i915_gem_object *ctx_obj,
+               struct intel_ringbuffer *ringbuf)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret = 0;
+
+       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+       ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
+                       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+       if (ret)
+               return ret;
+
+       ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+       if (ret)
+               goto unpin_ctx_obj;
+
+       ctx_obj->dirty = true;
+
+       /* Invalidate GuC TLB. */
+       if (i915.enable_guc_submission)
+               I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+
+       return ret;
+
+unpin_ctx_obj:
+       i915_gem_object_ggtt_unpin(ctx_obj);
+
+       return ret;
+}
+
 static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
+       int ret = 0;
        struct intel_engine_cs *ring = rq->ring;
        struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
        struct intel_ringbuffer *ringbuf = rq->ringbuf;
-       int ret = 0;
 
-       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
        if (rq->ctx->engine[ring->id].pin_count++ == 0) {
-               ret = i915_gem_obj_ggtt_pin(ctx_obj,
-                               GEN8_LR_CONTEXT_ALIGN, 0);
+               ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
                if (ret)
                        goto reset_pin_count;
-
-               ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
-               if (ret)
-                       goto unpin_ctx_obj;
-
-               ctx_obj->dirty = true;
        }
-
        return ret;
 
-unpin_ctx_obj:
-       i915_gem_object_ggtt_unpin(ctx_obj);
 reset_pin_count:
        rq->ctx->engine[ring->id].pin_count = 0;
-
        return ret;
 }
 
@@ -1113,7 +1167,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
        if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
                l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
-       wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) |
+       wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
        wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
@@ -1131,7 +1185,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
        wa_ctx_emit(batch, index, 0);
        wa_ctx_emit(batch, index, 0);
 
-       wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) |
+       wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
        wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
@@ -1200,9 +1254,10 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
        if (IS_BROADWELL(ring->dev)) {
-               index = gen8_emit_flush_coherentl3_wa(ring, batch, index);
-               if (index < 0)
-                       return index;
+               int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+               if (rc < 0)
+                       return rc;
+               index = rc;
        }
 
        /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
@@ -1426,6 +1481,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u8 next_context_status_buffer_hw;
 
+       lrc_setup_hardware_status_page(ring,
+                               ring->default_context->engine[ring->id].state);
+
        I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
        I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
 
@@ -1542,12 +1600,16 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
         * Ideally, we should set Force PD Restore in ctx descriptor,
         * but we can't. Force Restore would be a second option, but
         * it is unsafe in case of lite-restore (because the ctx is
-        * not idle). */
+        * not idle). PML4 is allocated during ppgtt init so this is
+        * not needed in 48-bit.*/
        if (req->ctx->ppgtt &&
            (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
-               ret = intel_logical_ring_emit_pdps(req);
-               if (ret)
-                       return ret;
+               if (!USES_FULL_48BIT_PPGTT(req->i915) &&
+                   !intel_vgpu_active(req->i915->dev)) {
+                       ret = intel_logical_ring_emit_pdps(req);
+                       if (ret)
+                               return ret;
+               }
 
                req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
        }
@@ -1713,6 +1775,34 @@ static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
        intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
+static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+
+       /*
+        * On BXT A steppings there is a HW coherency issue whereby the
+        * MI_STORE_DATA_IMM storing the completed request's seqno
+        * occasionally doesn't invalidate the CPU cache. Work around this by
+        * clflushing the corresponding cacheline whenever the caller wants
+        * the coherency to be guaranteed. Note that this cacheline is known
+        * to be clean at this point, since we only write it in
+        * bxt_a_set_seqno(), where we also do a clflush after the write. So
+        * this clflush in practice becomes an invalidate operation.
+        */
+
+       if (!lazy_coherency)
+               intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+
+       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+
+       /* See bxt_a_get_seqno() explaining the reason for the clflush. */
+       intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1855,7 +1945,21 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
        if (ret)
                return ret;
 
-       ret = intel_lr_context_deferred_create(ring->default_context, ring);
+       ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
+       if (ret)
+               return ret;
+
+       /* As this is the default context, always pin it */
+       ret = intel_lr_context_do_pin(
+                       ring,
+                       ring->default_context->engine[ring->id].state,
+                       ring->default_context->engine[ring->id].ringbuf);
+       if (ret) {
+               DRM_ERROR(
+                       "Failed to pin and map ringbuffer %s: %d\n",
+                       ring->name, ret);
+               return ret;
+       }
 
        return ret;
 }
@@ -1882,8 +1986,13 @@ static int logical_render_ring_init(struct drm_device *dev)
                ring->init_hw = gen8_init_render_ring;
        ring->init_context = gen8_init_rcs_context;
        ring->cleanup = intel_fini_pipe_control;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
+       if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+               ring->get_seqno = bxt_a_get_seqno;
+               ring->set_seqno = bxt_a_set_seqno;
+       } else {
+               ring->get_seqno = gen8_get_seqno;
+               ring->set_seqno = gen8_set_seqno;
+       }
        ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush_render;
        ring->irq_get = gen8_logical_ring_get_irq;
@@ -1929,8 +2038,13 @@ static int logical_bsd_ring_init(struct drm_device *dev)
                GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
        ring->init_hw = gen8_init_common_ring;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
+       if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+               ring->get_seqno = bxt_a_get_seqno;
+               ring->set_seqno = bxt_a_set_seqno;
+       } else {
+               ring->get_seqno = gen8_get_seqno;
+               ring->set_seqno = gen8_set_seqno;
+       }
        ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush;
        ring->irq_get = gen8_logical_ring_get_irq;
@@ -1979,8 +2093,13 @@ static int logical_blt_ring_init(struct drm_device *dev)
                GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
        ring->init_hw = gen8_init_common_ring;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
+       if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+               ring->get_seqno = bxt_a_get_seqno;
+               ring->set_seqno = bxt_a_set_seqno;
+       } else {
+               ring->get_seqno = gen8_get_seqno;
+               ring->set_seqno = gen8_set_seqno;
+       }
        ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush;
        ring->irq_get = gen8_logical_ring_get_irq;
@@ -2004,8 +2123,13 @@ static int logical_vebox_ring_init(struct drm_device *dev)
                GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
        ring->init_hw = gen8_init_common_ring;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
+       if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+               ring->get_seqno = bxt_a_get_seqno;
+               ring->set_seqno = bxt_a_set_seqno;
+       } else {
+               ring->get_seqno = gen8_get_seqno;
+               ring->set_seqno = gen8_set_seqno;
+       }
        ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush;
        ring->irq_get = gen8_logical_ring_get_irq;
@@ -2058,14 +2182,8 @@ int intel_logical_rings_init(struct drm_device *dev)
                        goto cleanup_vebox_ring;
        }
 
-       ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
-       if (ret)
-               goto cleanup_bsd2_ring;
-
        return 0;
 
-cleanup_bsd2_ring:
-       intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
 cleanup_vebox_ring:
        intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
 cleanup_blt_ring:
@@ -2151,7 +2269,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
-       page = i915_gem_object_get_page(ctx_obj, 1);
+       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
        reg_state = kmap_atomic(page);
 
        /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
@@ -2228,13 +2346,24 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
        reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
        reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
 
-       /* With dynamic page allocation, PDPs may not be allocated at this point,
-        * Point the unallocated PDPs to the scratch page
-        */
-       ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
-       ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
-       ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
-       ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+       if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+               /* 64b PPGTT (48bit canonical)
+                * PDP0_DESCRIPTOR contains the base address to PML4 and
+                * other PDP Descriptors are ignored.
+                */
+               ASSIGN_CTX_PML4(ppgtt, reg_state);
+       } else {
+               /* 32b PPGTT
+                * PDP*_DESCRIPTOR contains the base address of space supported.
+                * With dynamic page allocation, PDPs may not be allocated at
+                * this point. Point the unallocated PDPs to the scratch page
+                */
+               ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+               ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+               ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+               ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+       }
+
        if (ring->id == RCS) {
                reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
                reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
@@ -2275,8 +2404,7 @@ void intel_lr_context_free(struct intel_context *ctx)
                                i915_gem_object_ggtt_unpin(ctx_obj);
                        }
                        WARN_ON(ctx->engine[ring->id].pin_count);
-                       intel_destroy_ringbuffer_obj(ringbuf);
-                       kfree(ringbuf);
+                       intel_ringbuffer_free(ringbuf);
                        drm_gem_object_unreference(&ctx_obj->base);
                }
        }
@@ -2310,12 +2438,13 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
                struct drm_i915_gem_object *default_ctx_obj)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct page *page;
 
-       /* The status page is offset 0 from the default context object
-        * in LRC mode. */
-       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
-       ring->status_page.page_addr =
-                       kmap(sg_page(default_ctx_obj->pages->sgl));
+       /* The HWSP is part of the default context object in LRC mode. */
+       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
+                       + LRC_PPHWSP_PN * PAGE_SIZE;
+       page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
+       ring->status_page.page_addr = kmap(page);
        ring->status_page.obj = default_ctx_obj;
 
        I915_WRITE(RING_HWS_PGA(ring->mmio_base),
@@ -2324,7 +2453,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
 }
 
 /**
- * intel_lr_context_deferred_create() - create the LRC specific bits of a context
+ * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
  * @ctx: LR context to create.
  * @ring: engine to be used with the context.
  *
@@ -2336,10 +2465,10 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
  *
  * Return: non-zero on error.
  */
-int intel_lr_context_deferred_create(struct intel_context *ctx,
+
+int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                                     struct intel_engine_cs *ring)
 {
-       const bool is_global_default_ctx = (ctx == ring->default_context);
        struct drm_device *dev = ring->dev;
        struct drm_i915_gem_object *ctx_obj;
        uint32_t context_size;
@@ -2351,107 +2480,58 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 
        context_size = round_up(get_lr_context_size(ring), 4096);
 
+       /* One extra page as the sharing data between driver and GuC */
+       context_size += PAGE_SIZE * LRC_PPHWSP_PN;
+
        ctx_obj = i915_gem_alloc_object(dev, context_size);
        if (!ctx_obj) {
                DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
                return -ENOMEM;
        }
 
-       if (is_global_default_ctx) {
-               ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
-               if (ret) {
-                       DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
-                                       ret);
-                       drm_gem_object_unreference(&ctx_obj->base);
-                       return ret;
-               }
-       }
-
-       ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-       if (!ringbuf) {
-               DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
-                               ring->name);
-               ret = -ENOMEM;
-               goto error_unpin_ctx;
-       }
-
-       ringbuf->ring = ring;
-
-       ringbuf->size = 32 * PAGE_SIZE;
-       ringbuf->effective_size = ringbuf->size;
-       ringbuf->head = 0;
-       ringbuf->tail = 0;
-       ringbuf->last_retired_head = -1;
-       intel_ring_update_space(ringbuf);
-
-       if (ringbuf->obj == NULL) {
-               ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
-               if (ret) {
-                       DRM_DEBUG_DRIVER(
-                               "Failed to allocate ringbuffer obj %s: %d\n",
-                               ring->name, ret);
-                       goto error_free_rbuf;
-               }
-
-               if (is_global_default_ctx) {
-                       ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
-                       if (ret) {
-                               DRM_ERROR(
-                                       "Failed to pin and map ringbuffer %s: %d\n",
-                                       ring->name, ret);
-                               goto error_destroy_rbuf;
-                       }
-               }
-
+       ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
+       if (IS_ERR(ringbuf)) {
+               ret = PTR_ERR(ringbuf);
+               goto error_deref_obj;
        }
 
        ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
-               goto error;
+               goto error_ringbuf;
        }
 
        ctx->engine[ring->id].ringbuf = ringbuf;
        ctx->engine[ring->id].state = ctx_obj;
 
-       if (ctx == ring->default_context)
-               lrc_setup_hardware_status_page(ring, ctx_obj);
-       else if (ring->id == RCS && !ctx->rcs_initialized) {
-               if (ring->init_context) {
-                       struct drm_i915_gem_request *req;
+       if (ctx != ring->default_context && ring->init_context) {
+               struct drm_i915_gem_request *req;
 
-                       ret = i915_gem_request_alloc(ring, ctx, &req);
-                       if (ret)
-                               return ret;
-
-                       ret = ring->init_context(req);
-                       if (ret) {
-                               DRM_ERROR("ring init context: %d\n", ret);
-                               i915_gem_request_cancel(req);
-                               ctx->engine[ring->id].ringbuf = NULL;
-                               ctx->engine[ring->id].state = NULL;
-                               goto error;
-                       }
-
-                       i915_add_request_no_flush(req);
+               ret = i915_gem_request_alloc(ring,
+                       ctx, &req);
+               if (ret) {
+                       DRM_ERROR("ring create req: %d\n",
+                               ret);
+                       goto error_ringbuf;
                }
 
-               ctx->rcs_initialized = true;
+               ret = ring->init_context(req);
+               if (ret) {
+                       DRM_ERROR("ring init context: %d\n",
+                               ret);
+                       i915_gem_request_cancel(req);
+                       goto error_ringbuf;
+               }
+               i915_add_request_no_flush(req);
        }
-
        return 0;
 
-error:
-       if (is_global_default_ctx)
-               intel_unpin_ringbuffer_obj(ringbuf);
-error_destroy_rbuf:
-       intel_destroy_ringbuffer_obj(ringbuf);
-error_free_rbuf:
-       kfree(ringbuf);
-error_unpin_ctx:
-       if (is_global_default_ctx)
-               i915_gem_object_ggtt_unpin(ctx_obj);
+error_ringbuf:
+       intel_ringbuffer_free(ringbuf);
+error_deref_obj:
        drm_gem_object_unreference(&ctx_obj->base);
+       ctx->engine[ring->id].ringbuf = NULL;
+       ctx->engine[ring->id].state = NULL;
        return ret;
 }
 
@@ -2477,7 +2557,7 @@ void intel_lr_context_reset(struct drm_device *dev,
                        WARN(1, "Failed get_pages for context obj\n");
                        continue;
                }
-               page = i915_gem_object_get_page(ctx_obj, 1);
+               page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
                reg_state = kmap_atomic(page);
 
                reg_state[CTX_RING_HEAD+1] = 0;
index 3c63bb32ad81c657e418b1b7143ed934d036c66f..4e60d54ba66de8f0d1850ab1eb9276f381986090 100644 (file)
 
 /* Execlists regs */
 #define RING_ELSP(ring)                        ((ring)->mmio_base+0x230)
-#define RING_EXECLIST_STATUS(ring)     ((ring)->mmio_base+0x234)
+#define RING_EXECLIST_STATUS_LO(ring)  ((ring)->mmio_base+0x234)
+#define RING_EXECLIST_STATUS_HI(ring)  ((ring)->mmio_base+0x234 + 4)
 #define RING_CONTEXT_CONTROL(ring)     ((ring)->mmio_base+0x244)
 #define          CTX_CTRL_INHIBIT_SYN_CTX_SWITCH       (1 << 3)
 #define          CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT   (1 << 0)
 #define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
-#define RING_CONTEXT_STATUS_BUF(ring)  ((ring)->mmio_base+0x370)
+#define RING_CONTEXT_STATUS_BUF_LO(ring, i)    ((ring)->mmio_base+0x370 + (i) * 8)
+#define RING_CONTEXT_STATUS_BUF_HI(ring, i)    ((ring)->mmio_base+0x370 + (i) * 8 + 4)
 #define RING_CONTEXT_STATUS_PTR(ring)  ((ring)->mmio_base+0x3a0)
 
 /* Logical Rings */
@@ -70,12 +72,20 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
 }
 
 /* Logical Ring Contexts */
+
+/* One extra page is added before LRC for GuC as shared data */
+#define LRC_GUCSHR_PN  (0)
+#define LRC_PPHWSP_PN  (LRC_GUCSHR_PN + 1)
+#define LRC_STATE_PN   (LRC_PPHWSP_PN + 1)
+
 void intel_lr_context_free(struct intel_context *ctx);
-int intel_lr_context_deferred_create(struct intel_context *ctx,
-                                    struct intel_engine_cs *ring);
+int intel_lr_context_deferred_alloc(struct intel_context *ctx,
+                                   struct intel_engine_cs *ring);
 void intel_lr_context_unpin(struct drm_i915_gem_request *req);
 void intel_lr_context_reset(struct drm_device *dev,
                        struct intel_context *ctx);
+uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+                                    struct intel_engine_cs *ring);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
index 881b5d13592ef8075e64786c68e31050f6741d94..2c2d1f0737c8d70f0daff9a049627fdb4c1015d2 100644 (file)
@@ -289,11 +289,14 @@ intel_lvds_mode_valid(struct drm_connector *connector,
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+       int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
 
        if (mode->hdisplay > fixed_mode->hdisplay)
                return MODE_PANEL;
        if (mode->vdisplay > fixed_mode->vdisplay)
                return MODE_PANEL;
+       if (fixed_mode->clock > max_pixclk)
+               return MODE_CLOCK_HIGH;
 
        return MODE_OK;
 }
@@ -952,7 +955,7 @@ void intel_lvds_init(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                I915_WRITE(PCH_PP_CONTROL,
                           I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
-       } else {
+       } else if (INTEL_INFO(dev_priv)->gen < 5) {
                I915_WRITE(PP_CONTROL,
                           I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
        }
@@ -982,6 +985,18 @@ void intel_lvds_init(struct drm_device *dev)
                DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
        }
 
+        /* Set the Panel Power On/Off timings if uninitialized. */
+       if (INTEL_INFO(dev_priv)->gen < 5 &&
+           I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
+               /* Set T2 to 40ms and T5 to 200ms */
+               I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+               /* Set T3 to 35ms and Tx to 200ms */
+               I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+
+               DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n");
+       }
+
        lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
        if (!lvds_encoder)
                return;
index e2ab3f6ed0222b9728f2a463f241a700cdc62e41..2c11b4eedfc67355706e0556e58df79e8424a71d 100644 (file)
@@ -484,7 +484,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
        return val;
 }
 
-static u32 bdw_get_backlight(struct intel_connector *connector)
+static u32 lpt_get_backlight(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -576,7 +576,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
        return val;
 }
 
-static void bdw_set_backlight(struct intel_connector *connector, u32 level)
+static void lpt_set_backlight(struct intel_connector *connector, u32 level)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -729,6 +729,18 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
        mutex_unlock(&dev_priv->backlight_lock);
 }
 
+static void lpt_disable_backlight(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 tmp;
+
+       intel_panel_actually_set_backlight(connector, 0);
+
+       tmp = I915_READ(BLC_PWM_PCH_CTL1);
+       I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
 static void pch_disable_backlight(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
@@ -809,7 +821,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
                return;
 
        /*
-        * Do not disable backlight on the vgaswitcheroo path. When switching
+        * Do not disable backlight on the vga_switcheroo path. When switching
         * away from i915, the other client may depend on i915 to handle the
         * backlight. This will leave the backlight on unnecessarily when
         * another client is not activated.
@@ -829,7 +841,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
        mutex_unlock(&dev_priv->backlight_lock);
 }
 
-static void bdw_enable_backlight(struct intel_connector *connector)
+static void lpt_enable_backlight(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1212,10 +1224,149 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
 #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
 
 /*
- * Note: The setup hooks can't assume pipe is set!
+ * SPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 16 (default increment) or 128 (alternate increment selected in
+ * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
+ */
+static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 mul, clock;
+
+       if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
+               mul = 128;
+       else
+               mul = 16;
+
+       clock = MHz(24);
+
+       return clock / (pwm_freq_hz * mul);
+}
+
+/*
+ * LPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 128 (default increment) or 16 (alternate increment, selected in
+ * LPT SOUTH_CHICKEN2 register bit 5).
+ */
+static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 mul, clock;
+
+       if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
+               mul = 16;
+       else
+               mul = 128;
+
+       if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+               clock = MHz(135); /* LPT:H */
+       else
+               clock = MHz(24); /* LPT:LP */
+
+       return clock / (pwm_freq_hz * mul);
+}
+
+/*
+ * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
+ * display raw clocks multiplied by 128.
+ */
+static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+       struct drm_device *dev = connector->base.dev;
+       int clock = MHz(intel_pch_rawclk(dev));
+
+       return clock / (pwm_freq_hz * 128);
+}
+
+/*
+ * Gen2: This field determines the number of time base events (display core
+ * clock frequency/32) in total for a complete cycle of modulated backlight
+ * control.
  *
- * XXX: Query mode clock or hardware clock and program PWM modulation frequency
- * appropriately when it's 0. Use VBT and/or sane defaults.
+ * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
+ * divided by 32.
+ */
+static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int clock;
+
+       if (IS_PINEVIEW(dev))
+               clock = intel_hrawclk(dev);
+       else
+               clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
+
+       return clock / (pwm_freq_hz * 32);
+}
+
+/*
+ * Gen4: This value represents the period of the PWM stream in display core
+ * clocks multiplied by 128.
+ */
+static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int clock = 1000 * dev_priv->display.get_display_clock_speed(dev);
+
+       return clock / (pwm_freq_hz * 128);
+}
+
+/*
+ * VLV: This value represents the period of the PWM stream in display core
+ * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
+ * multiplied by 16. CHV uses a 19.2MHz S0IX clock.
+ */
+static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int clock;
+
+       if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+               if (IS_CHERRYVIEW(dev))
+                       return KHz(19200) / (pwm_freq_hz * 16);
+               else
+                       return MHz(25) / (pwm_freq_hz * 16);
+       } else {
+               clock = intel_hrawclk(dev);
+               return MHz(clock) / (pwm_freq_hz * 128);
+       }
+}
+
+static u32 get_backlight_max_vbt(struct intel_connector *connector)
+{
+       struct drm_device *dev = connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
+       u32 pwm;
+
+       if (!pwm_freq_hz) {
+               DRM_DEBUG_KMS("backlight frequency not specified in VBT\n");
+               return 0;
+       }
+
+       if (!dev_priv->display.backlight_hz_to_pwm) {
+               DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
+               return 0;
+       }
+
+       pwm = dev_priv->display.backlight_hz_to_pwm(connector, pwm_freq_hz);
+       if (!pwm) {
+               DRM_DEBUG_KMS("backlight frequency conversion failed\n");
+               return 0;
+       }
+
+       DRM_DEBUG_KMS("backlight frequency %u Hz from VBT\n", pwm_freq_hz);
+
+       return pwm;
+}
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
  */
 static u32 get_backlight_min_vbt(struct intel_connector *connector)
 {
@@ -1243,7 +1394,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
        return scale(min, 0, 255, 0, panel->backlight.max);
 }
 
-static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
+static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
 {
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1255,12 +1406,16 @@ static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unus
 
        pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
        panel->backlight.max = pch_ctl2 >> 16;
+
+       if (!panel->backlight.max)
+               panel->backlight.max = get_backlight_max_vbt(connector);
+
        if (!panel->backlight.max)
                return -ENODEV;
 
        panel->backlight.min = get_backlight_min_vbt(connector);
 
-       val = bdw_get_backlight(connector);
+       val = lpt_get_backlight(connector);
        panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
        panel->backlight.enabled = (pch_ctl1 & BLM_PCH_PWM_ENABLE) &&
@@ -1281,6 +1436,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
 
        pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
        panel->backlight.max = pch_ctl2 >> 16;
+
+       if (!panel->backlight.max)
+               panel->backlight.max = get_backlight_max_vbt(connector);
+
        if (!panel->backlight.max)
                return -ENODEV;
 
@@ -1312,12 +1471,18 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
                panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
 
        panel->backlight.max = ctl >> 17;
-       if (panel->backlight.combination_mode)
-               panel->backlight.max *= 0xff;
+
+       if (!panel->backlight.max) {
+               panel->backlight.max = get_backlight_max_vbt(connector);
+               panel->backlight.max >>= 1;
+       }
 
        if (!panel->backlight.max)
                return -ENODEV;
 
+       if (panel->backlight.combination_mode)
+               panel->backlight.max *= 0xff;
+
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = i9xx_get_backlight(connector);
@@ -1341,12 +1506,16 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
 
        ctl = I915_READ(BLC_PWM_CTL);
        panel->backlight.max = ctl >> 16;
-       if (panel->backlight.combination_mode)
-               panel->backlight.max *= 0xff;
+
+       if (!panel->backlight.max)
+               panel->backlight.max = get_backlight_max_vbt(connector);
 
        if (!panel->backlight.max)
                return -ENODEV;
 
+       if (panel->backlight.combination_mode)
+               panel->backlight.max *= 0xff;
+
        panel->backlight.min = get_backlight_min_vbt(connector);
 
        val = i9xx_get_backlight(connector);
@@ -1363,21 +1532,8 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
        struct drm_device *dev = connector->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_panel *panel = &connector->panel;
-       enum pipe p;
        u32 ctl, ctl2, val;
 
-       for_each_pipe(dev_priv, p) {
-               u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
-
-               /* Skip if the modulation freq is already set */
-               if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
-                       continue;
-
-               cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
-               I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
-                          cur_val);
-       }
-
        if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
                return -ENODEV;
 
@@ -1386,6 +1542,10 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
 
        ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
        panel->backlight.max = ctl >> 16;
+
+       if (!panel->backlight.max)
+               panel->backlight.max = get_backlight_max_vbt(connector);
+
        if (!panel->backlight.max)
                return -ENODEV;
 
@@ -1412,6 +1572,10 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
        panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
 
        panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1);
+
+       if (!panel->backlight.max)
+               panel->backlight.max = get_backlight_max_vbt(connector);
+
        if (!panel->backlight.max)
                return -ENODEV;
 
@@ -1519,18 +1683,23 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
                dev_priv->display.disable_backlight = bxt_disable_backlight;
                dev_priv->display.set_backlight = bxt_set_backlight;
                dev_priv->display.get_backlight = bxt_get_backlight;
-       } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
-               dev_priv->display.setup_backlight = bdw_setup_backlight;
-               dev_priv->display.enable_backlight = bdw_enable_backlight;
-               dev_priv->display.disable_backlight = pch_disable_backlight;
-               dev_priv->display.set_backlight = bdw_set_backlight;
-               dev_priv->display.get_backlight = bdw_get_backlight;
+       } else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
+               dev_priv->display.setup_backlight = lpt_setup_backlight;
+               dev_priv->display.enable_backlight = lpt_enable_backlight;
+               dev_priv->display.disable_backlight = lpt_disable_backlight;
+               dev_priv->display.set_backlight = lpt_set_backlight;
+               dev_priv->display.get_backlight = lpt_get_backlight;
+               if (HAS_PCH_LPT(dev))
+                       dev_priv->display.backlight_hz_to_pwm = lpt_hz_to_pwm;
+               else
+                       dev_priv->display.backlight_hz_to_pwm = spt_hz_to_pwm;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.setup_backlight = pch_setup_backlight;
                dev_priv->display.enable_backlight = pch_enable_backlight;
                dev_priv->display.disable_backlight = pch_disable_backlight;
                dev_priv->display.set_backlight = pch_set_backlight;
                dev_priv->display.get_backlight = pch_get_backlight;
+               dev_priv->display.backlight_hz_to_pwm = pch_hz_to_pwm;
        } else if (IS_VALLEYVIEW(dev)) {
                if (dev_priv->vbt.has_mipi) {
                        dev_priv->display.setup_backlight = pwm_setup_backlight;
@@ -1544,6 +1713,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
                        dev_priv->display.disable_backlight = vlv_disable_backlight;
                        dev_priv->display.set_backlight = vlv_set_backlight;
                        dev_priv->display.get_backlight = vlv_get_backlight;
+                       dev_priv->display.backlight_hz_to_pwm = vlv_hz_to_pwm;
                }
        } else if (IS_GEN4(dev)) {
                dev_priv->display.setup_backlight = i965_setup_backlight;
@@ -1551,12 +1721,14 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
                dev_priv->display.disable_backlight = i965_disable_backlight;
                dev_priv->display.set_backlight = i9xx_set_backlight;
                dev_priv->display.get_backlight = i9xx_get_backlight;
+               dev_priv->display.backlight_hz_to_pwm = i965_hz_to_pwm;
        } else {
                dev_priv->display.setup_backlight = i9xx_setup_backlight;
                dev_priv->display.enable_backlight = i9xx_enable_backlight;
                dev_priv->display.disable_backlight = i9xx_disable_backlight;
                dev_priv->display.set_backlight = i9xx_set_backlight;
                dev_priv->display.get_backlight = i9xx_get_backlight;
+               dev_priv->display.backlight_hz_to_pwm = i9xx_hz_to_pwm;
        }
 }
 
index ddbb7ed0a193229355700926006578ca5f06b937..ab5ac5ee18257d8b05ee175dfd474e39fb61731c 100644 (file)
@@ -116,18 +116,30 @@ static void bxt_init_clock_gating(struct drm_device *dev)
 
        gen9_init_clock_gating(dev);
 
+       /* WaDisableSDEUnitClockGating:bxt */
+       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
        /*
         * FIXME:
-        * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
         * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
         */
-        /* WaDisableSDEUnitClockGating:bxt */
        I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
-                  GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
                   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
 
-       /* FIXME: apply on A0 only */
-       I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
+       if (INTEL_REVID(dev) == BXT_REVID_A0) {
+               /*
+                * Hardware specification requires this bit to be
+                * set to 1 for A0
+                */
+               I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
+       }
+
+       /* WaSetClckGatingDisableMedia:bxt */
+       if (INTEL_REVID(dev) == BXT_REVID_A0) {
+               I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+                                           ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
+       }
 }
 
 static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -3166,7 +3178,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
                if (fb) {
                        p->plane[0].enabled = true;
                        p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
-                               drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8;
+                               drm_format_plane_cpp(fb->pixel_format, 1) :
+                               drm_format_plane_cpp(fb->pixel_format, 0);
                        p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
                                drm_format_plane_cpp(fb->pixel_format, 0) : 0;
                        p->plane[0].tiling = fb->modifier[0];
@@ -3672,6 +3685,26 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
        }
 }
 
+static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
+{
+       watermarks->wm_linetime[pipe] = 0;
+       memset(watermarks->plane[pipe], 0,
+              sizeof(uint32_t) * 8 * I915_MAX_PLANES);
+       memset(watermarks->cursor[pipe], 0, sizeof(uint32_t) * 8);
+       memset(watermarks->plane_trans[pipe],
+              0, sizeof(uint32_t) * I915_MAX_PLANES);
+       watermarks->cursor_trans[pipe] = 0;
+
+       /* Clear ddb entries for pipe */
+       memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
+       memset(&watermarks->ddb.plane[pipe], 0,
+              sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
+       memset(&watermarks->ddb.y_plane[pipe], 0,
+              sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
+       memset(&watermarks->ddb.cursor[pipe], 0, sizeof(struct skl_ddb_entry));
+
+}
+
 static void skl_update_wm(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3682,7 +3715,11 @@ static void skl_update_wm(struct drm_crtc *crtc)
        struct skl_pipe_wm pipe_wm = {};
        struct intel_wm_config config = {};
 
-       memset(results, 0, sizeof(*results));
+
+       /* Clear all dirty flags */
+       memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
+
+       skl_clear_wm(results, intel_crtc->pipe);
 
        skl_compute_wm_global_parameters(dev, &config);
 
@@ -4261,7 +4298,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
        fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
                MEMMODE_FSTART_SHIFT;
 
-       vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+       vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
                PXVFREQ_PX_SHIFT;
 
        dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
@@ -4292,10 +4329,10 @@ static void ironlake_enable_drps(struct drm_device *dev)
 
        ironlake_set_drps(dev, fstart);
 
-       dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
-               I915_READ(0x112e0);
+       dev_priv->ips.last_count1 = I915_READ(DMIEC) +
+               I915_READ(DDREC) + I915_READ(CSIEC);
        dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
-       dev_priv->ips.last_count2 = I915_READ(0x112f4);
+       dev_priv->ips.last_count2 = I915_READ(GFXEC);
        dev_priv->ips.last_time2 = ktime_get_raw_ns();
 
        spin_unlock_irq(&mchdev_lock);
@@ -4466,6 +4503,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
+       if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
+               return;
+
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
        WARN_ON(val > dev_priv->rps.max_freq);
        WARN_ON(val < dev_priv->rps.min_freq);
@@ -4786,6 +4827,12 @@ static void gen9_enable_rps(struct drm_device *dev)
 
        gen6_init_rps_frequencies(dev);
 
+       /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
+       if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
+               intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+               return;
+       }
+
        /* Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RC_VIDEO_FREQ,
                GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
@@ -4823,11 +4870,21 @@ static void gen9_enable_rc6(struct drm_device *dev)
        I915_WRITE(GEN6_RC_CONTROL, 0);
 
        /* 2b: Program RC6 thresholds.*/
-       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+
+       /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
+       if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
+                                (INTEL_REVID(dev) <= SKL_REVID_E0)))
+               I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+       else
+               I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
        I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
        I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
        for_each_ring(ring, dev_priv, unused)
                I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+       if (HAS_GUC_UCODE(dev))
+               I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
+
        I915_WRITE(GEN6_RC_SLEEP, 0);
        I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
 
@@ -4840,17 +4897,27 @@ static void gen9_enable_rc6(struct drm_device *dev)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
        DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
                        "on" : "off");
-       I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-                                  GEN6_RC_CTL_EI_MODE(1) |
-                                  rc6_mask);
+
+       if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
+           (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0))
+               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                          GEN7_RC_CTL_TO_MODE |
+                          rc6_mask);
+       else
+               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                          GEN6_RC_CTL_EI_MODE(1) |
+                          rc6_mask);
 
        /*
         * 3b: Enable Coarse Power Gating only when RC6 is enabled.
-        * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6.
+        * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
         */
-       I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
-                       GEN9_MEDIA_PG_ENABLE : 0);
-
+       if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
+           ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
+               I915_WRITE(GEN9_PG_ENABLE, 0);
+       else
+               I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+                               (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
@@ -5565,7 +5632,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
        /* RPS code assumes GPLL is used */
        WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
 
-       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
+       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
        dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -5655,7 +5722,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
        /* RPS code assumes GPLL is used */
        WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
 
-       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
+       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
        DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
        dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -5864,7 +5931,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
 
        assert_spin_locked(&mchdev_lock);
 
-       pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
+       pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
        pxvid = (pxvid >> 24) & 0x7f;
        ext_v = pvid_to_extvid(dev_priv, pxvid);
 
@@ -6107,13 +6174,13 @@ static void intel_init_emon(struct drm_device *dev)
        I915_WRITE(CSIEW2, 0x04000004);
 
        for (i = 0; i < 5; i++)
-               I915_WRITE(PEW + (i * 4), 0);
+               I915_WRITE(PEW(i), 0);
        for (i = 0; i < 3; i++)
-               I915_WRITE(DEW + (i * 4), 0);
+               I915_WRITE(DEW(i), 0);
 
        /* Program P-state weights to account for frequency power adjustment */
        for (i = 0; i < 16; i++) {
-               u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+               u32 pxvidfreq = I915_READ(PXVFREQ(i));
                unsigned long freq = intel_pxfreq(pxvidfreq);
                unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
                        PXVFREQ_PX_SHIFT;
@@ -6134,7 +6201,7 @@ static void intel_init_emon(struct drm_device *dev)
        for (i = 0; i < 4; i++) {
                u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
                        (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
-               I915_WRITE(PXW + (i * 4), val);
+               I915_WRITE(PXW(i), val);
        }
 
        /* Adjust magic regs to magic values (more experimental results) */
@@ -6150,7 +6217,7 @@ static void intel_init_emon(struct drm_device *dev)
        I915_WRITE(EG7, 0);
 
        for (i = 0; i < 8; i++)
-               I915_WRITE(PXWL + (i * 4), 0);
+               I915_WRITE(PXWL(i), 0);
 
        /* Enable PMON + select events */
        I915_WRITE(ECR, 0x80000019);
@@ -6604,7 +6671,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
         * TODO: this bit should only be enabled when really needed, then
         * disabled when not needed anymore in order to save power.
         */
-       if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+       if (HAS_PCH_LPT_LP(dev))
                I915_WRITE(SOUTH_DSPCLK_GATE_D,
                           I915_READ(SOUTH_DSPCLK_GATE_D) |
                           PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -6619,7 +6686,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+       if (HAS_PCH_LPT_LP(dev)) {
                uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
 
                val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
index 6e6b8db996ef2450c615a71ef10b7ffcbbc62479..16a4eada60a1b718d3db5634f6ac74fff1c0ce07 100644 (file)
@@ -983,6 +983,16 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
                tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
        WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
 
+       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
+       if (IS_SKYLAKE(dev) ||
+           (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
+               WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+                                 GEN8_SAMPLER_POWER_BYPASS_DIS);
+       }
+
+       /* WaDisableSTUnitPowerOptimization:skl,bxt */
+       WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+
        return 0;
 }
 
@@ -1996,14 +2006,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
        return 0;
 }
 
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
        drm_gem_object_unreference(&ringbuf->obj->base);
        ringbuf->obj = NULL;
 }
 
-int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-                              struct intel_ringbuffer *ringbuf)
+static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+                                     struct intel_ringbuffer *ringbuf)
 {
        struct drm_i915_gem_object *obj;
 
@@ -2023,6 +2033,48 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
        return 0;
 }
 
+struct intel_ringbuffer *
+intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
+{
+       struct intel_ringbuffer *ring;
+       int ret;
+
+       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+       if (ring == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       ring->ring = engine;
+
+       ring->size = size;
+       /* Workaround an erratum on the i830 which causes a hang if
+        * the TAIL pointer points to within the last 2 cachelines
+        * of the buffer.
+        */
+       ring->effective_size = size;
+       if (IS_I830(engine->dev) || IS_845G(engine->dev))
+               ring->effective_size -= 2 * CACHELINE_BYTES;
+
+       ring->last_retired_head = -1;
+       intel_ring_update_space(ring);
+
+       ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+       if (ret) {
+               DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+                         engine->name, ret);
+               kfree(ring);
+               return ERR_PTR(ret);
+       }
+
+       return ring;
+}
+
+void
+intel_ringbuffer_free(struct intel_ringbuffer *ring)
+{
+       intel_destroy_ringbuffer_obj(ring);
+       kfree(ring);
+}
+
 static int intel_init_ring_buffer(struct drm_device *dev,
                                  struct intel_engine_cs *ring)
 {
@@ -2031,22 +2083,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 
        WARN_ON(ring->buffer);
 
-       ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-       if (!ringbuf)
-               return -ENOMEM;
-       ring->buffer = ringbuf;
-
        ring->dev = dev;
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
        INIT_LIST_HEAD(&ring->execlist_queue);
        i915_gem_batch_pool_init(dev, &ring->batch_pool);
-       ringbuf->size = 32 * PAGE_SIZE;
-       ringbuf->ring = ring;
        memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
        init_waitqueue_head(&ring->irq_queue);
 
+       ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
+       if (IS_ERR(ringbuf))
+               return PTR_ERR(ringbuf);
+       ring->buffer = ringbuf;
+
        if (I915_NEED_GFX_HWS(dev)) {
                ret = init_status_page(ring);
                if (ret)
@@ -2058,15 +2108,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                        goto error;
        }
 
-       WARN_ON(ringbuf->obj);
-
-       ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
-       if (ret) {
-               DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
-                               ring->name, ret);
-               goto error;
-       }
-
        ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
        if (ret) {
                DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
@@ -2075,14 +2116,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                goto error;
        }
 
-       /* Workaround an erratum on the i830 which causes a hang if
-        * the TAIL pointer points to within the last 2 cachelines
-        * of the buffer.
-        */
-       ringbuf->effective_size = ringbuf->size;
-       if (IS_I830(dev) || IS_845G(dev))
-               ringbuf->effective_size -= 2 * CACHELINE_BYTES;
-
        ret = i915_cmd_parser_init_ring(ring);
        if (ret)
                goto error;
@@ -2090,7 +2123,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        return 0;
 
 error:
-       kfree(ringbuf);
+       intel_ringbuffer_free(ringbuf);
        ring->buffer = NULL;
        return ret;
 }
@@ -2098,19 +2131,18 @@ error:
 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv;
-       struct intel_ringbuffer *ringbuf;
 
        if (!intel_ring_initialized(ring))
                return;
 
        dev_priv = to_i915(ring->dev);
-       ringbuf = ring->buffer;
 
        intel_stop_ring_buffer(ring);
        WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
-       intel_unpin_ringbuffer_obj(ringbuf);
-       intel_destroy_ringbuffer_obj(ringbuf);
+       intel_unpin_ringbuffer_obj(ring->buffer);
+       intel_ringbuffer_free(ring->buffer);
+       ring->buffer = NULL;
 
        if (ring->cleanup)
                ring->cleanup(ring);
@@ -2119,9 +2151,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 
        i915_cmd_parser_fini_ring(ring);
        i915_gem_batch_pool_fini(&ring->batch_pool);
-
-       kfree(ringbuf);
-       ring->buffer = NULL;
 }
 
 static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
index 2e85fda949638079d2c7c4c9f8ac9daf39f97f5f..49fa41dc0eb66af4ee7ba10c15f6948ad84bb079 100644 (file)
@@ -377,6 +377,13 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
        return idx;
 }
 
+static inline void
+intel_flush_status_page(struct intel_engine_cs *ring, int reg)
+{
+       drm_clflush_virt_range(&ring->status_page.page_addr[reg],
+                              sizeof(uint32_t));
+}
+
 static inline u32
 intel_read_status_page(struct intel_engine_cs *ring,
                       int reg)
@@ -413,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
 #define I915_GEM_HWS_SCRATCH_INDEX     0x40
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+struct intel_ringbuffer *
+intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                                     struct intel_ringbuffer *ringbuf);
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
-int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-                              struct intel_ringbuffer *ringbuf);
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+void intel_ringbuffer_free(struct intel_ringbuffer *ring);
 
 void intel_stop_ring_buffer(struct intel_engine_cs *ring);
 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
index 7401cf90b0dbcd1eb335e0c6defc42d22c9bb631..d194492263eb861551c3289c5b51305c26ebd9ed 100644 (file)
@@ -464,14 +464,14 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
        bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
                                        SKL_DISP_PW_2);
 
-       WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
-       WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
-       WARN(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+       WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
+       WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
+       WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 
-       WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
-                               "DC5 already programmed to be enabled.\n");
-       WARN(dev_priv->pm.suspended,
-               "DC5 cannot be enabled, if platform is runtime-suspended.\n");
+       WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
+                 "DC5 already programmed to be enabled.\n");
+       WARN_ONCE(dev_priv->pm.suspended,
+                 "DC5 cannot be enabled, if platform is runtime-suspended.\n");
 
        assert_csr_loaded(dev_priv);
 }
@@ -487,8 +487,8 @@ static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
        if (dev_priv->power_domains.initializing)
                return;
 
-       WARN(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
-       WARN(dev_priv->pm.suspended,
+       WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
+       WARN_ONCE(dev_priv->pm.suspended,
                "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
 }
 
@@ -527,12 +527,12 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
 
-       WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
-       WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
-       WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
-               "Backlight is not disabled.\n");
-       WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
-               "DC6 already programmed to be enabled.\n");
+       WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
+       WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
+       WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+                 "Backlight is not disabled.\n");
+       WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+                 "DC6 already programmed to be enabled.\n");
 
        assert_csr_loaded(dev_priv);
 }
@@ -547,8 +547,8 @@ static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
                return;
 
        assert_csr_loaded(dev_priv);
-       WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
-               "DC6 already programmed to be disabled.\n");
+       WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+                 "DC6 already programmed to be disabled.\n");
 }
 
 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
@@ -671,7 +671,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
                                wait_for((state = intel_csr_load_status_get(dev_priv)) !=
                                                FW_UNINITIALIZED, 1000);
                                if (state != FW_LOADED)
-                                       DRM_ERROR("CSR firmware not ready (%d)\n",
+                                       DRM_DEBUG("CSR firmware not ready (%d)\n",
                                                        state);
                                else
                                        if (SKL_ENABLE_DC6(dev))
@@ -856,6 +856,25 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
 
 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 {
+       enum pipe pipe;
+
+       /*
+        * Enable the CRI clock source so we can get at the
+        * display and the reference clock for VGA
+        * hotplug / manual detection. Supposedly DSI also
+        * needs the ref clock up and running.
+        *
+        * CHV DPLL B/C have some issues if VGA mode is enabled.
+        */
+       for_each_pipe(dev_priv->dev, pipe) {
+               u32 val = I915_READ(DPLL(pipe));
+
+               val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+               if (pipe != PIPE_A)
+                       val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+               I915_WRITE(DPLL(pipe), val);
+       }
 
        spin_lock_irq(&dev_priv->irq_lock);
        valleyview_enable_display_irqs(dev_priv);
@@ -907,13 +926,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
 {
        WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
-       /*
-        * Enable the CRI clock source so we can get at the
-        * display and the reference clock for VGA
-        * hotplug / manual detection.
-        */
-       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
-                  DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       /* since ref/cri clock was enabled */
        udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
        vlv_set_power_well(dev_priv, power_well, true);
@@ -948,30 +961,126 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
        vlv_set_power_well(dev_priv, power_well, false);
 }
 
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+                                                int power_well_id)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+       int i;
+
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+               if (power_well->data == power_well_id)
+                       return power_well;
+       }
+
+       return NULL;
+}
+
+#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
+
+static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_well *cmn_bc =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+       struct i915_power_well *cmn_d =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+       u32 phy_control = dev_priv->chv_phy_control;
+       u32 phy_status = 0;
+       u32 tmp;
+
+       if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+               phy_status |= PHY_POWERGOOD(DPIO_PHY0);
+
+               /* this assumes override is only used to enable lanes */
+               if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
+                       phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
+
+               if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
+                       phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
+
+               /* CL1 is on whenever anything is on in either channel */
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
+                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
+                       phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
+
+               /*
+                * The DPLLB check accounts for the pipe B + port A usage
+                * with CL2 powered up but all the lanes in the second channel
+                * powered down.
+                */
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
+                   (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+                       phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
+
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
+
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
+       }
+
+       if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+               phy_status |= PHY_POWERGOOD(DPIO_PHY1);
+
+               /* this assumes override is only used to enable lanes */
+               if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
+                       phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
+
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
+
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
+               if (BITS_SET(phy_control,
+                            PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
+                       phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
+       }
+
+       /*
+        * The PHY may be busy with some initial calibration and whatnot,
+        * so the power state can take a while to actually change.
+        */
+       if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS)) == phy_status, 10))
+               WARN(phy_status != tmp,
+                    "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+                    tmp, phy_status, dev_priv->chv_phy_control);
+}
+
+#undef BITS_SET
+
 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
        enum dpio_phy phy;
+       enum pipe pipe;
+       uint32_t tmp;
 
        WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
                     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
 
-       /*
-        * Enable the CRI clock source so we can get at the
-        * display and the reference clock for VGA
-        * hotplug / manual detection.
-        */
        if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+               pipe = PIPE_A;
                phy = DPIO_PHY0;
-               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
-                          DPLL_REF_CLK_ENABLE_VLV);
-               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
-                          DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
        } else {
+               pipe = PIPE_C;
                phy = DPIO_PHY1;
-               I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
-                          DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
        }
+
+       /* since ref/cri clock was enabled */
        udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
        vlv_set_power_well(dev_priv, power_well, true);
 
@@ -979,8 +1088,38 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
        if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
                DRM_ERROR("Display PHY %d is not power up\n", phy);
 
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* Enable dynamic power down */
+       tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+       tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
+               DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+
+       if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+               tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+               tmp |= DPIO_DYNPWRDOWNEN_CH1;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+       } else {
+               /*
+                * Force the non-existing CL2 off. BXT does this
+                * too, so maybe it saves some power even though
+                * CL2 doesn't exist?
+                */
+               tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+               tmp |= DPIO_CL2_LDOFUSE_PWRENB;
+               vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+       }
+
+       mutex_unlock(&dev_priv->sb_lock);
+
        dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
        I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+       DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+                     phy, dev_priv->chv_phy_control);
+
+       assert_chv_phy_status(dev_priv);
 }
 
 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1004,6 +1143,124 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
        I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
 
        vlv_set_power_well(dev_priv, power_well, false);
+
+       DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+                     phy, dev_priv->chv_phy_control);
+
+       assert_chv_phy_status(dev_priv);
+}
+
+static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+                                    enum dpio_channel ch, bool override, unsigned int mask)
+{
+       enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
+       u32 reg, val, expected, actual;
+
+       if (ch == DPIO_CH0)
+               reg = _CHV_CMN_DW0_CH0;
+       else
+               reg = _CHV_CMN_DW6_CH1;
+
+       mutex_lock(&dev_priv->sb_lock);
+       val = vlv_dpio_read(dev_priv, pipe, reg);
+       mutex_unlock(&dev_priv->sb_lock);
+
+       /*
+        * This assumes !override is only used when the port is disabled.
+        * All lanes should power down even without the override when
+        * the port is disabled.
+        */
+       if (!override || mask == 0xf) {
+               expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+               /*
+                * If CH1 common lane is not active anymore
+                * (eg. for pipe B DPLL) the entire channel will
+                * shut down, which causes the common lane registers
+                * to read as 0. That means we can't actually check
+                * the lane power down status bits, but as the entire
+                * register reads as 0 it's a good indication that the
+                * channel is indeed entirely powered down.
+                */
+               if (ch == DPIO_CH1 && val == 0)
+                       expected = 0;
+       } else if (mask != 0x0) {
+               expected = DPIO_ANYDL_POWERDOWN;
+       } else {
+               expected = 0;
+       }
+
+       if (ch == DPIO_CH0)
+               actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+       else
+               actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
+       actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+
+       WARN(actual != expected,
+            "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+            !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
+            !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
+            reg, val);
+}
+
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+                         enum dpio_channel ch, bool override)
+{
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       bool was_override;
+
+       mutex_lock(&power_domains->lock);
+
+       was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+       if (override == was_override)
+               goto out;
+
+       if (override)
+               dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+       else
+               dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+       DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+                     phy, ch, dev_priv->chv_phy_control);
+
+       assert_chv_phy_status(dev_priv);
+
+out:
+       mutex_unlock(&power_domains->lock);
+
+       return was_override;
+}
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+                            bool override, unsigned int mask)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
+       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+
+       mutex_lock(&power_domains->lock);
+
+       dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+       dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+
+       if (override)
+               dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+       else
+               dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+       DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+                     phy, ch, mask, dev_priv->chv_phy_control);
+
+       assert_chv_phy_status(dev_priv);
+
+       assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+
+       mutex_unlock(&power_domains->lock);
 }
 
 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
@@ -1167,8 +1424,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        intel_runtime_pm_put(dev_priv);
 }
 
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
 #define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
        BIT(POWER_DOMAIN_PIPE_A) |                      \
        BIT(POWER_DOMAIN_TRANSCODER_EDP) |              \
@@ -1430,21 +1685,6 @@ static struct i915_power_well chv_power_wells[] = {
        },
 };
 
-static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
-                                                int power_well_id)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *power_well;
-       int i;
-
-       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
-               if (power_well->data == power_well_id)
-                       return power_well;
-       }
-
-       return NULL;
-}
-
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
                                    int power_well_id)
 {
@@ -1630,19 +1870,72 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
         * DISPLAY_PHY_CONTROL can get corrupted if read. As a
         * workaround never ever read DISPLAY_PHY_CONTROL, and
         * instead maintain a shadow copy ourselves. Use the actual
-        * power well state to reconstruct the expected initial
-        * value.
+        * power well state and lane status to reconstruct the
+        * expected initial value.
         */
        dev_priv->chv_phy_control =
                PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
                PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
-               PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) |
-               PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) |
-               PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0);
-       if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc))
+               PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
+               PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
+               PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
+
+       /*
+        * If all lanes are disabled we leave the override disabled
+        * with all power down bits cleared to match the state we
+        * would use after disabling the port. Otherwise enable the
+        * override and set the lane powerdown bits accding to the
+        * current lane status.
+        */
+       if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+               uint32_t status = I915_READ(DPLL(PIPE_A));
+               unsigned int mask;
+
+               mask = status & DPLL_PORTB_READY_MASK;
+               if (mask == 0xf)
+                       mask = 0x0;
+               else
+                       dev_priv->chv_phy_control |=
+                               PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
+
+               dev_priv->chv_phy_control |=
+                       PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
+
+               mask = (status & DPLL_PORTC_READY_MASK) >> 4;
+               if (mask == 0xf)
+                       mask = 0x0;
+               else
+                       dev_priv->chv_phy_control |=
+                               PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
+
+               dev_priv->chv_phy_control |=
+                       PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
+
                dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
-       if (cmn_d->ops->is_enabled(dev_priv, cmn_d))
+       }
+
+       if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+               uint32_t status = I915_READ(DPIO_PHY_STATUS);
+               unsigned int mask;
+
+               mask = status & DPLL_PORTD_READY_MASK;
+
+               if (mask == 0xf)
+                       mask = 0x0;
+               else
+                       dev_priv->chv_phy_control |=
+                               PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
+
+               dev_priv->chv_phy_control |=
+                       PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
+
                dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+       }
+
+       I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+       DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
+                     dev_priv->chv_phy_control);
 }
 
 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
@@ -1688,7 +1981,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
        power_domains->initializing = true;
 
        if (IS_CHERRYVIEW(dev)) {
+               mutex_lock(&power_domains->lock);
                chv_phy_control_init(dev_priv);
+               mutex_unlock(&power_domains->lock);
        } else if (IS_VALLEYVIEW(dev)) {
                mutex_lock(&power_domains->lock);
                vlv_cmnlane_wa(dev_priv);
index c98098e884ccef64e7d722a70063bd97b404a7cc..05521b5c6878aed4ba55cb781f44edf40cb5c208 100644 (file)
@@ -53,7 +53,7 @@
 #define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
 
 
-static const char *tv_format_names[] = {
+static const char * const tv_format_names[] = {
        "NTSC_M"   , "NTSC_J"  , "NTSC_443",
        "PAL_B"    , "PAL_D"   , "PAL_G"   ,
        "PAL_H"    , "PAL_I"   , "PAL_M"   ,
@@ -63,7 +63,7 @@ static const char *tv_format_names[] = {
        "SECAM_60"
 };
 
-#define TV_FORMAT_NUM  (sizeof(tv_format_names) / sizeof(*tv_format_names))
+#define TV_FORMAT_NUM  ARRAY_SIZE(tv_format_names)
 
 struct intel_sdvo {
        struct intel_encoder base;
@@ -452,7 +452,7 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
        DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
 }
 
-static const char *cmd_status_names[] = {
+static const char * const cmd_status_names[] = {
        "Power on",
        "Success",
        "Not supported",
@@ -2222,7 +2222,7 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
  */
 static void
 intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
-                         struct intel_sdvo *sdvo, u32 reg)
+                         struct intel_sdvo *sdvo)
 {
        struct sdvo_device_mapping *mapping;
 
@@ -2239,7 +2239,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
 
 static void
 intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
-                         struct intel_sdvo *sdvo, u32 reg)
+                         struct intel_sdvo *sdvo)
 {
        struct sdvo_device_mapping *mapping;
        u8 pin;
@@ -2925,7 +2925,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
        intel_sdvo->sdvo_reg = sdvo_reg;
        intel_sdvo->is_sdvob = is_sdvob;
        intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
-       intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
+       intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
        if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
                goto err_i2c_bus;
 
@@ -2987,7 +2987,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
         */
        intel_sdvo->base.cloneable = 0;
 
-       intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+       intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
 
        /* Set the input timing to the screen. Assume always input 0. */
        if (!intel_sdvo_set_target_input(intel_sdvo))
index 9d8af2f8a87596caf23ddc65ae7e13025d9c5f9a..4349fde4b72cc20387a9348c7d3177571c45fce0 100644 (file)
@@ -76,7 +76,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
  * avoid random delays. The value written to @start_vbl_count should be
  * supplied to intel_pipe_update_end() for error checking.
  */
-void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
+void intel_pipe_update_start(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
        const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
@@ -95,7 +95,6 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
        max = vblank_start - 1;
 
        local_irq_disable();
-       *start_vbl_count = 0;
 
        if (min <= 0 || max <= 0)
                return;
@@ -103,7 +102,9 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
        if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
                return;
 
-       trace_i915_pipe_update_start(crtc, min, max);
+       crtc->debug.min_vbl = min;
+       crtc->debug.max_vbl = max;
+       trace_i915_pipe_update_start(crtc);
 
        for (;;) {
                /*
@@ -134,9 +135,12 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
 
        drm_crtc_vblank_put(&crtc->base);
 
-       *start_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+       crtc->debug.scanline_start = scanline;
+       crtc->debug.start_vbl_time = ktime_get();
+       crtc->debug.start_vbl_count =
+               dev->driver->get_vblank_counter(dev, pipe);
 
-       trace_i915_pipe_update_vblank_evaded(crtc, min, max, *start_vbl_count);
+       trace_i915_pipe_update_vblank_evaded(crtc);
 }
 
 /**
@@ -148,19 +152,27 @@ void intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
  * re-enables interrupts and verifies the update was actually completed
  * before a vblank using the value of @start_vbl_count.
  */
-void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
+void intel_pipe_update_end(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
        enum pipe pipe = crtc->pipe;
+       int scanline_end = intel_get_crtc_scanline(crtc);
        u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+       ktime_t end_vbl_time = ktime_get();
 
-       trace_i915_pipe_update_end(crtc, end_vbl_count);
+       trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
 
        local_irq_enable();
 
-       if (start_vbl_count && start_vbl_count != end_vbl_count)
-               DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u)\n",
-                         pipe_name(pipe), start_vbl_count, end_vbl_count);
+       if (crtc->debug.start_vbl_count &&
+           crtc->debug.start_vbl_count != end_vbl_count) {
+               DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+                         pipe_name(pipe), crtc->debug.start_vbl_count,
+                         end_vbl_count,
+                         ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
+                         crtc->debug.min_vbl, crtc->debug.max_vbl,
+                         crtc->debug.scanline_start, scanline_end);
+       }
 }
 
 static void
@@ -223,12 +235,12 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
        else if (key->flags & I915_SET_COLORKEY_SOURCE)
                plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
 
-       surf_addr = intel_plane_obj_offset(intel_plane, obj);
+       surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
 
        if (intel_rotation_90_or_270(rotation)) {
                /* stride: Surface height in tiles */
                tile_height = intel_tile_height(dev, fb->pixel_format,
-                                               fb->modifier[0]);
+                                               fb->modifier[0], 0);
                stride = DIV_ROUND_UP(fb->height, tile_height);
                plane_size = (src_w << 16) | src_h;
                x_offset = stride * tile_height - y - (src_h + 1);
@@ -923,8 +935,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
 
        crtc = crtc ? crtc : plane->crtc;
 
-       plane->fb = fb;
-
        if (!crtc->state->active)
                return;
 
@@ -1121,7 +1131,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 
        intel_plane->pipe = pipe;
        intel_plane->plane = plane;
-       intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe);
+       intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
        intel_plane->check_plane = intel_check_sprite_plane;
        intel_plane->commit_plane = intel_commit_sprite_plane;
        possible_crtcs = (1 << pipe);
index 0568ae6ec9dd2c945b0395a14305c281daf18be2..6bea78944cd680056c0c9006937e4b4d1f61e4f8 100644 (file)
@@ -1138,13 +1138,13 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder)
 
        j = 0;
        for (i = 0; i < 60; i++)
-               I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+               I915_WRITE(TV_H_LUMA(i), tv_mode->filter_table[j++]);
        for (i = 0; i < 60; i++)
-               I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+               I915_WRITE(TV_H_CHROMA(i), tv_mode->filter_table[j++]);
        for (i = 0; i < 43; i++)
-               I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
+               I915_WRITE(TV_V_LUMA(i), tv_mode->filter_table[j++]);
        for (i = 0; i < 43; i++)
-               I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
+               I915_WRITE(TV_V_CHROMA(i), tv_mode->filter_table[j++]);
        I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
        I915_WRITE(TV_CTL, tv_ctl);
 }
@@ -1291,7 +1291,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
                return;
 
 
-       for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
+       for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
                tv_mode = tv_modes + i;
 
                if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
@@ -1579,7 +1579,7 @@ intel_tv_init(struct drm_device *dev)
        struct intel_encoder *intel_encoder;
        struct intel_connector *intel_connector;
        u32 tv_dac_on, tv_dac_off, save_tv_dac;
-       char *tv_format_names[ARRAY_SIZE(tv_modes)];
+       const char *tv_format_names[ARRAY_SIZE(tv_modes)];
        int i, initial_mode = 0;
 
        if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1677,7 +1677,7 @@ intel_tv_init(struct drm_device *dev)
 
        /* Create TV properties then attach current values */
        for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
-               tv_format_names[i] = (char *)tv_modes[i].name;
+               tv_format_names[i] = tv_modes[i].name;
        drm_mode_create_tv_properties(dev,
                                      ARRAY_SIZE(tv_modes),
                                      tv_format_names);
index 9d3c2e420d2b68611d52e9c6395d35721da19447..14d0831c6156c8d6990954cac907a2435c81726b 100644 (file)
@@ -27,7 +27,7 @@
 
 #include <linux/pm_runtime.h>
 
-#define FORCEWAKE_ACK_TIMEOUT_MS 2
+#define FORCEWAKE_ACK_TIMEOUT_MS 50
 
 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
@@ -52,8 +52,7 @@ static const char * const forcewake_domain_names[] = {
 const char *
 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
 {
-       BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
-                    FW_DOMAIN_ID_COUNT);
+       BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
 
        if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
                return forcewake_domain_names[id];
@@ -770,6 +769,7 @@ static u##x \
 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
        enum forcewake_domains fw_engine; \
        GEN6_READ_HEADER(x); \
+       hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
        if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)))   \
                fw_engine = 0; \
        else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg))       \
@@ -783,6 +783,7 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
        if (fw_engine) \
                __force_wake_get(dev_priv, fw_engine); \
        val = __raw_i915_read##x(dev_priv, reg); \
+       hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
        GEN6_READ_FOOTER; \
 }
 
@@ -983,6 +984,7 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
                bool trace) { \
        enum forcewake_domains fw_engine; \
        GEN6_WRITE_HEADER; \
+       hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
        if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
            is_gen9_shadowed(dev_priv, reg)) \
                fw_engine = 0; \
@@ -997,6 +999,8 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
        if (fw_engine) \
                __force_wake_get(dev_priv, fw_engine); \
        __raw_i915_write##x(dev_priv, reg, val); \
+       hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+       hsw_unclaimed_reg_detect(dev_priv); \
        GEN6_WRITE_FOOTER; \
 }
 
@@ -1198,8 +1202,6 @@ void intel_uncore_init(struct drm_device *dev)
 
        switch (INTEL_INFO(dev)->gen) {
        default:
-               MISSING_CASE(INTEL_INFO(dev)->gen);
-               return;
        case 9:
                ASSIGN_WRITE_MMIO_VFUNCS(gen9);
                ASSIGN_READ_MMIO_VFUNCS(gen9);
index 74f505b0dd0280d47fcd9268e2b4d95a1ddae250..de00a6c31ab69e7a41584bfd2e155cda6d6a52ce 100644 (file)
@@ -145,10 +145,10 @@ void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
 }
 EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
 
-static int imx_drm_enable_vblank(struct drm_device *drm, int crtc)
+static int imx_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
 {
        struct imx_drm_device *imxdrm = drm->dev_private;
-       struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[crtc];
+       struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[pipe];
        int ret;
 
        if (!imx_drm_crtc)
@@ -163,10 +163,10 @@ static int imx_drm_enable_vblank(struct drm_device *drm, int crtc)
        return ret;
 }
 
-static void imx_drm_disable_vblank(struct drm_device *drm, int crtc)
+static void imx_drm_disable_vblank(struct drm_device *drm, unsigned int pipe)
 {
        struct imx_drm_device *imxdrm = drm->dev_private;
-       struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[crtc];
+       struct imx_drm_crtc *imx_drm_crtc = imxdrm->crtc[pipe];
 
        if (!imx_drm_crtc)
                return;
@@ -487,7 +487,7 @@ static struct drm_driver imx_drm_driver = {
        .gem_prime_vmap         = drm_gem_cma_prime_vmap,
        .gem_prime_vunmap       = drm_gem_cma_prime_vunmap,
        .gem_prime_mmap         = drm_gem_cma_prime_mmap,
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = imx_drm_enable_vblank,
        .disable_vblank         = imx_drm_disable_vblank,
        .ioctls                 = imx_drm_ioctls,
index 8cfa9cb74c8679a329ff14293f0e1fd4b2927323..1f2f9ca259010184ee6e949facd44df1e964f9f0 100644 (file)
@@ -416,7 +416,7 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
        return 0;
 }
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 /**
  * Bootstrap the driver for AGP DMA.
  *
@@ -947,7 +947,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
                        drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
 
                if (dev_priv->used_new_dma_init) {
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
                        if (dev_priv->agp_handle != 0) {
                                struct drm_agp_binding unbind_req;
                                struct drm_agp_buffer free_req;
index b4a2014917e525109eaccdd5cd2237e6eb82915d..bb312339e0b00cbe8b2c45e5d32c2e5a7e1337d1 100644 (file)
@@ -183,9 +183,9 @@ extern int mga_warp_install_microcode(drm_mga_private_t *dev_priv);
 extern int mga_warp_init(drm_mga_private_t *dev_priv);
 
                                /* mga_irq.c */
-extern int mga_enable_vblank(struct drm_device *dev, int crtc);
-extern void mga_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int mga_enable_vblank(struct drm_device *dev, unsigned int pipe);
+extern void mga_disable_vblank(struct drm_device *dev, unsigned int pipe);
+extern u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
 extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
 extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
 extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
index 1b071b8ff9dccec81e1d93376d0b6b09bc58521b..693ba708cfed8c8389d4149a8717377b97216b7f 100644 (file)
 #include <drm/mga_drm.h>
 #include "mga_drv.h"
 
-u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
+u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        const drm_mga_private_t *const dev_priv =
                (drm_mga_private_t *) dev->dev_private;
 
-       if (crtc != 0)
+       if (pipe != 0)
                return 0;
 
        return atomic_read(&dev_priv->vbl_received);
@@ -88,13 +88,13 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
        return IRQ_NONE;
 }
 
-int mga_enable_vblank(struct drm_device *dev, int crtc)
+int mga_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 
-       if (crtc != 0) {
-               DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
-                         crtc);
+       if (pipe != 0) {
+               DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
+                         pipe);
                return 0;
        }
 
@@ -103,11 +103,11 @@ int mga_enable_vblank(struct drm_device *dev, int crtc)
 }
 
 
-void mga_disable_vblank(struct drm_device *dev, int crtc)
+void mga_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       if (crtc != 0) {
-               DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
-                         crtc);
+       if (pipe != 0) {
+               DRM_ERROR("tried to disable vblank on non-existent crtc %u\n",
+                         pipe);
        }
 
        /* Do *NOT* disable the vertical refresh interrupt.  MGA doesn't have
index e9dee367b597e0761f874ce62fde57ad2f8b90fb..30d57e74c42f6f4e07579f8d7fdbdf856263ec78 100644 (file)
@@ -99,22 +99,28 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
 };
 
 static int mdp4_plane_prepare_fb(struct drm_plane *plane,
-               struct drm_framebuffer *fb,
                const struct drm_plane_state *new_state)
 {
        struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
        struct mdp4_kms *mdp4_kms = get_kms(plane);
+       struct drm_framebuffer *fb = new_state->fb;
+
+       if (!fb)
+               return 0;
 
        DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
        return msm_framebuffer_prepare(fb, mdp4_kms->id);
 }
 
 static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
-               struct drm_framebuffer *fb,
                const struct drm_plane_state *old_state)
 {
        struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
        struct mdp4_kms *mdp4_kms = get_kms(plane);
+       struct drm_framebuffer *fb = old_state->fb;
+
+       if (!fb)
+               return;
 
        DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
        msm_framebuffer_cleanup(fb, mdp4_kms->id);
index 07fb62fea6dc1d142928a2838134ca30c88fdfac..a0f5ff0ce8dcc4721c25f99587b2f36ca333c25e 100644 (file)
@@ -250,22 +250,28 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
 };
 
 static int mdp5_plane_prepare_fb(struct drm_plane *plane,
-               struct drm_framebuffer *fb,
                const struct drm_plane_state *new_state)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
        struct mdp5_kms *mdp5_kms = get_kms(plane);
+       struct drm_framebuffer *fb = new_state->fb;
+
+       if (!new_state->fb)
+               return 0;
 
        DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
        return msm_framebuffer_prepare(fb, mdp5_kms->id);
 }
 
 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
-               struct drm_framebuffer *fb,
                const struct drm_plane_state *old_state)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
        struct mdp5_kms *mdp5_kms = get_kms(plane);
+       struct drm_framebuffer *fb = old_state->fb;
+
+       if (!fb)
+               return;
 
        DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
        msm_framebuffer_cleanup(fb, mdp5_kms->id);
index 1ceb4f22dd8997a7b4e772d82e646abb1a87c7ff..7eb253bc24df3156b75f6409f6b9a9df184b66ae 100644 (file)
@@ -125,7 +125,7 @@ static void complete_commit(struct msm_commit *c)
 
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
-       drm_atomic_helper_commit_planes(dev, state);
+       drm_atomic_helper_commit_planes(dev, state, false);
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
index 0339c5d82d373b3038dca40ffc15a6ffcd58af55..a06ec71e109da96742827803643f87f7b031b7ac 100644 (file)
@@ -531,24 +531,24 @@ static void msm_irq_uninstall(struct drm_device *dev)
        kms->funcs->irq_uninstall(kms);
 }
 
-static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
+static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_kms *kms = priv->kms;
        if (!kms)
                return -ENXIO;
-       DBG("dev=%p, crtc=%d", dev, crtc_id);
-       return vblank_ctrl_queue_work(priv, crtc_id, true);
+       DBG("dev=%p, crtc=%u", dev, pipe);
+       return vblank_ctrl_queue_work(priv, pipe, true);
 }
 
-static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
+static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_kms *kms = priv->kms;
        if (!kms)
                return;
-       DBG("dev=%p, crtc=%d", dev, crtc_id);
-       vblank_ctrl_queue_work(priv, crtc_id, false);
+       DBG("dev=%p, crtc=%u", dev, pipe);
+       vblank_ctrl_queue_work(priv, pipe, false);
 }
 
 /*
@@ -978,7 +978,7 @@ static struct drm_driver msm_driver = {
        .irq_preinstall     = msm_irq_preinstall,
        .irq_postinstall    = msm_irq_postinstall,
        .irq_uninstall      = msm_irq_uninstall,
-       .get_vblank_counter = drm_vblank_count,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank      = msm_enable_vblank,
        .disable_vblank     = msm_disable_vblank,
        .gem_free_object    = msm_gem_free_object,
index 08c6f5e5061095f1d62f73676c7118c0e1ad4553..903c473d266ff5d32d71059abb6406bface455f0 100644 (file)
@@ -32,7 +32,7 @@
 #include "hw.h"
 #include "tvnv17.h"
 
-char *nv17_tv_norm_names[NUM_TV_NORMS] = {
+const char * const nv17_tv_norm_names[NUM_TV_NORMS] = {
        [TV_NORM_PAL] = "PAL",
        [TV_NORM_PAL_M] = "PAL-M",
        [TV_NORM_PAL_N] = "PAL-N",
index 459910b6bb3206980b4995e7685d657a5cd32d02..1b07521cde0de37ccfedd6114d66b7ad659770ee 100644 (file)
@@ -85,7 +85,7 @@ struct nv17_tv_encoder {
 #define to_tv_enc(x) container_of(nouveau_encoder(x),          \
                                  struct nv17_tv_encoder, base)
 
-extern char *nv17_tv_norm_names[NUM_TV_NORMS];
+extern const char * const nv17_tv_norm_names[NUM_TV_NORMS];
 
 extern struct nv17_tv_norm_params {
        enum {
index 15057b39491ca697ccc1a8a093599f74f5ace531..78f520d05de92ea627980c5e8f2bb8388715e6fe 100644 (file)
@@ -574,7 +574,7 @@ static struct ttm_tt *
 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
                      uint32_t page_flags, struct page *dummy_read)
 {
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        struct nouveau_drm *drm = nouveau_bdev(bdev);
 
        if (drm->agp.bridge) {
@@ -1366,7 +1366,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
                /* System memory */
                return 0;
        case TTM_PL_TT:
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
                if (drm->agp.bridge) {
                        mem->bus.offset = mem->start << PAGE_SHIFT;
                        mem->bus.base = drm->agp.base;
@@ -1496,7 +1496,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
            ttm->caching_state == tt_uncached)
                return ttm_dma_populate(ttm_dma, dev->dev);
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (drm->agp.bridge) {
                return ttm_agp_tt_populate(ttm);
        }
@@ -1563,7 +1563,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
                return;
        }
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (drm->agp.bridge) {
                ttm_agp_tt_unpopulate(ttm);
                return;
index cc6c228e11c83566d1ac1a2c59fcefa959345463..614b32e6381cb93c11e19068cded923185bad847 100644 (file)
@@ -51,12 +51,12 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
 }
 
 int
-nouveau_display_vblank_enable(struct drm_device *dev, int head)
+nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_crtc *crtc;
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-               if (nv_crtc->index == head) {
+               if (nv_crtc->index == pipe) {
                        nvif_notify_get(&nv_crtc->vblank);
                        return 0;
                }
@@ -65,12 +65,12 @@ nouveau_display_vblank_enable(struct drm_device *dev, int head)
 }
 
 void
-nouveau_display_vblank_disable(struct drm_device *dev, int head)
+nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_crtc *crtc;
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-               if (nv_crtc->index == head) {
+               if (nv_crtc->index == pipe) {
                        nvif_notify_put(&nv_crtc->vblank);
                        return;
                }
@@ -103,6 +103,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
                .base.head = nouveau_crtc(crtc)->index,
        };
        struct nouveau_display *disp = nouveau_display(crtc->dev);
+       struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
        int ret, retry = 1;
 
        do {
@@ -116,7 +117,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
                        break;
                }
 
-               if (retry) ndelay(crtc->linedur_ns);
+               if (retry) ndelay(vblank->linedur_ns);
        } while (retry--);
 
        *hpos = args.scan.hline;
@@ -131,13 +132,15 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
 }
 
 int
-nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags,
-                          int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+nouveau_display_scanoutpos(struct drm_device *dev, unsigned int pipe,
+                          unsigned int flags, int *vpos, int *hpos,
+                          ktime_t *stime, ktime_t *etime,
+                          const struct drm_display_mode *mode)
 {
        struct drm_crtc *crtc;
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (nouveau_crtc(crtc)->index == head) {
+               if (nouveau_crtc(crtc)->index == pipe) {
                        return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
                                                               stime, etime);
                }
@@ -147,15 +150,15 @@ nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags,
 }
 
 int
-nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error,
-                        struct timeval *time, unsigned flags)
+nouveau_display_vblstamp(struct drm_device *dev, unsigned int pipe,
+                        int *max_error, struct timeval *time, unsigned flags)
 {
        struct drm_crtc *crtc;
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (nouveau_crtc(crtc)->index == head) {
+               if (nouveau_crtc(crtc)->index == pipe) {
                        return drm_calc_vbltimestamp_from_scanoutpos(dev,
-                                       head, max_error, time, flags, crtc,
+                                       pipe, max_error, time, flags,
                                        &crtc->hwmode);
                }
        }
@@ -469,9 +472,13 @@ nouveau_display_create(struct drm_device *dev)
        if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
                dev->mode_config.max_width = 4096;
                dev->mode_config.max_height = 4096;
-       } else {
+       } else
+       if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) {
                dev->mode_config.max_width = 8192;
                dev->mode_config.max_height = 8192;
+       } else {
+               dev->mode_config.max_width = 16384;
+               dev->mode_config.max_height = 16384;
        }
 
        dev->mode_config.preferred_depth = 24;
index a6213e2425c597cca0168e12188476a1e8baad31..856abe0f070d0335b198d57b745c2fa247a5e7c6 100644 (file)
@@ -65,11 +65,12 @@ int  nouveau_display_init(struct drm_device *dev);
 void nouveau_display_fini(struct drm_device *dev);
 int  nouveau_display_suspend(struct drm_device *dev, bool runtime);
 void nouveau_display_resume(struct drm_device *dev, bool runtime);
-int  nouveau_display_vblank_enable(struct drm_device *, int);
-void nouveau_display_vblank_disable(struct drm_device *, int);
-int  nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
-                               int *, int *, ktime_t *, ktime_t *);
-int  nouveau_display_vblstamp(struct drm_device *, int, int *,
+int  nouveau_display_vblank_enable(struct drm_device *, unsigned int);
+void nouveau_display_vblank_disable(struct drm_device *, unsigned int);
+int  nouveau_display_scanoutpos(struct drm_device *, unsigned int,
+                               unsigned int, int *, int *, ktime_t *,
+                               ktime_t *, const struct drm_display_mode *);
+int  nouveau_display_vblstamp(struct drm_device *, unsigned int, int *,
                              struct timeval *, unsigned);
 
 int  nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
index ccefb645fd55dfd4e93f3461021e3bb845a9da29..2416c7dddd5bba35e46a73be0badfe6b90d23d91 100644 (file)
@@ -934,7 +934,7 @@ driver_stub = {
        .debugfs_cleanup = nouveau_debugfs_takedown,
 #endif
 
-       .get_vblank_counter = drm_vblank_count,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank = nouveau_display_vblank_enable,
        .disable_vblank = nouveau_display_vblank_disable,
        .get_scanout_position = nouveau_display_scanoutpos,
index 2791701685dc82bf4e2655ce3ea8ea6c3b278e49..59f27e774acb5e9c98c9854bd72195efdbec3a48 100644 (file)
@@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info)
        return 0;
 }
 
+static int
+nouveau_fbcon_open(struct fb_info *info, int user)
+{
+       struct nouveau_fbdev *fbcon = info->par;
+       struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+       int ret = pm_runtime_get_sync(drm->dev->dev);
+       if (ret < 0 && ret != -EACCES)
+               return ret;
+       return 0;
+}
+
+static int
+nouveau_fbcon_release(struct fb_info *info, int user)
+{
+       struct nouveau_fbdev *fbcon = info->par;
+       struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+       pm_runtime_put(drm->dev->dev);
+       return 0;
+}
+
 static struct fb_ops nouveau_fbcon_ops = {
        .owner = THIS_MODULE,
+       .fb_open = nouveau_fbcon_open,
+       .fb_release = nouveau_fbcon_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = nouveau_fbcon_fillrect,
@@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = {
 
 static struct fb_ops nouveau_fbcon_sw_ops = {
        .owner = THIS_MODULE,
+       .fb_open = nouveau_fbcon_open,
+       .fb_release = nouveau_fbcon_release,
        .fb_check_var = drm_fb_helper_check_var,
        .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
index 65af31441e9c29496647084d927c11f393974653..a7d69ce7abc1ad33b5fd283a5f4b5321feb89f18 100644 (file)
@@ -267,6 +267,12 @@ init_i2c(struct nvbios_init *init, int index)
                index = NVKM_I2C_BUS_PRI;
                if (init->outp && init->outp->i2c_upper_default)
                        index = NVKM_I2C_BUS_SEC;
+       } else
+       if (index == 0x80) {
+               index = NVKM_I2C_BUS_PRI;
+       } else
+       if (index == 0x81) {
+               index = NVKM_I2C_BUS_SEC;
        }
 
        bus = nvkm_i2c_bus_find(i2c, index);
index e0ec2a6b7b795c964e119eae2dfed644d24e4ae2..212800ecdce99e4eb1a3a23ebdab9c207cd860da 100644 (file)
@@ -8,7 +8,10 @@ struct nvbios_source {
        void *(*init)(struct nvkm_bios *, const char *);
        void  (*fini)(void *);
        u32   (*read)(void *, u32 offset, u32 length, struct nvkm_bios *);
+       u32   (*size)(void *);
        bool rw;
+       bool ignore_checksum;
+       bool no_pcir;
 };
 
 int nvbios_extend(struct nvkm_bios *, u32 length);
index 792f017525f689bb1d38b86c0bf2e746f9495c8d..b2557e87afdd6d0e95910b3b4b91e37ce9a3e269 100644 (file)
@@ -45,7 +45,7 @@ shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto)
                u32 read = mthd->func->read(data, start, limit - start, bios);
                bios->size = start + read;
        }
-       return bios->size >= limit;
+       return bios->size >= upto;
 }
 
 static int
@@ -55,14 +55,22 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
        struct nvbios_image image;
        int score = 1;
 
-       if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
-               nvkm_debug(subdev, "%08x: header fetch failed\n", offset);
-               return 0;
-       }
+       if (mthd->func->no_pcir) {
+               image.base = 0;
+               image.type = 0;
+               image.size = mthd->func->size(mthd->data);
+               image.last = 1;
+       } else {
+               if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
+                       nvkm_debug(subdev, "%08x: header fetch failed\n",
+                                  offset);
+                       return 0;
+               }
 
-       if (!nvbios_image(bios, idx, &image)) {
-               nvkm_debug(subdev, "image %d invalid\n", idx);
-               return 0;
+               if (!nvbios_image(bios, idx, &image)) {
+                       nvkm_debug(subdev, "image %d invalid\n", idx);
+                       return 0;
+               }
        }
        nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
                   image.base, image.type, image.size);
@@ -74,7 +82,8 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
 
        switch (image.type) {
        case 0x00:
-               if (nvbios_checksum(&bios->data[image.base], image.size)) {
+               if (!mthd->func->ignore_checksum &&
+                   nvbios_checksum(&bios->data[image.base], image.size)) {
                        nvkm_debug(subdev, "%08x: checksum failed\n",
                                   image.base);
                        if (mthd->func->rw)
index bd60d7dd09f51a45b70f120597ca38adaf8c102b..4bf486b57101367708bba2b6fe4bdd1d985f1d19 100644 (file)
@@ -21,6 +21,7 @@
  *
  */
 #include "priv.h"
+
 #include <core/pci.h>
 
 #if defined(__powerpc__)
@@ -33,17 +34,26 @@ static u32
 of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
        struct priv *priv = data;
-       if (offset + length <= priv->size) {
+       if (offset < priv->size) {
+               length = min_t(u32, length, priv->size - offset);
                memcpy_fromio(bios->data + offset, priv->data + offset, length);
                return length;
        }
        return 0;
 }
 
+static u32
+of_size(void *data)
+{
+       struct priv *priv = data;
+       return priv->size;
+}
+
 static void *
 of_init(struct nvkm_bios *bios, const char *name)
 {
-       struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev;
+       struct nvkm_device *device = bios->subdev.device;
+       struct pci_dev *pdev = device->func->pci(device)->pdev;
        struct device_node *dn;
        struct priv *priv;
        if (!(dn = pci_device_to_OF_node(pdev)))
@@ -62,7 +72,10 @@ nvbios_of = {
        .init = of_init,
        .fini = (void(*)(void *))kfree,
        .read = of_read,
+       .size = of_size,
        .rw = false,
+       .ignore_checksum = true,
+       .no_pcir = true,
 };
 #else
 const struct nvbios_source
index 814cb51cc87372bd4c18225b16b1401d10285b60..385a90f91ed6a14e394ba1e8b4743d9c38c06412 100644 (file)
@@ -35,6 +35,8 @@ static const struct nvkm_device_agp_quirk
 nvkm_device_agp_quirks[] = {
        /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
        { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+       /* SiS 761 does not support AGP cards, use PCI mode */
+       { PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 },
        {},
 };
 
@@ -137,8 +139,10 @@ nvkm_agp_ctor(struct nvkm_pci *pci)
        while (quirk->hostbridge_vendor) {
                if (info.device->vendor == quirk->hostbridge_vendor &&
                    info.device->device == quirk->hostbridge_device &&
-                   pci->pdev->vendor == quirk->chip_vendor &&
-                   pci->pdev->device == quirk->chip_device) {
+                   (quirk->chip_vendor == (u16)PCI_ANY_ID ||
+                   pci->pdev->vendor == quirk->chip_vendor) &&
+                   (quirk->chip_device == (u16)PCI_ANY_ID ||
+                   pci->pdev->device == quirk->chip_device)) {
                        nvkm_info(subdev, "forcing default agp mode to %dX, "
                                          "use NvAGP=<mode> to override\n",
                                  quirk->mode);
index 419c2e49adf5e38439f6bb727c4d780be4b1e00a..4d5893473f7828dbbc1499106e54a3294917c6d8 100644 (file)
@@ -96,7 +96,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
        dispc_runtime_get();
 
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
-       drm_atomic_helper_commit_planes(dev, old_state);
+       drm_atomic_helper_commit_planes(dev, old_state, false);
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
        omap_atomic_wait_for_completion(dev, old_state);
@@ -753,7 +753,7 @@ static void dev_lastclose(struct drm_device *dev)
 {
        int i;
 
-       /* we don't support vga-switcheroo.. so just make sure the fbdev
+       /* we don't support vga_switcheroo.. so just make sure the fbdev
         * mode is active
         */
        struct omap_drm_private *priv = dev->dev_private;
@@ -839,7 +839,7 @@ static struct drm_driver omap_drm_driver = {
        .preclose = dev_preclose,
        .postclose = dev_postclose,
        .set_busid = drm_platform_set_busid,
-       .get_vblank_counter = drm_vblank_count,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank = omap_irq_enable_vblank,
        .disable_vblank = omap_irq_disable_vblank,
 #ifdef CONFIG_DEBUG_FS
index 12081e61d45a99b02956bda4dc5759036ef64d23..5c367aad8a6e419f39b437813e0df168e4dfbc71 100644 (file)
@@ -129,8 +129,8 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
 int omap_gem_resume(struct device *dev);
 #endif
 
-int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
-void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
+int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe);
+void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe);
 void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
 void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
 void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
index 51b1219af87f1b29fbbe641fa2fbfd51b5c3b92d..636a1f921569a55edcac211e2869dca9eaee7125 100644 (file)
@@ -171,7 +171,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                uint32_t w = win->src_w;
                uint32_t h = win->src_h;
 
-               switch (win->rotation & 0xf) {
+               switch (win->rotation & DRM_ROTATE_MASK) {
                default:
                        dev_err(fb->dev->dev, "invalid rotation: %02x",
                                        (uint32_t)win->rotation);
@@ -209,7 +209,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                info->rotation_type = OMAP_DSS_ROT_TILER;
                info->screen_width  = omap_gem_tiled_stride(plane->bo, orient);
        } else {
-               switch (win->rotation & 0xf) {
+               switch (win->rotation & DRM_ROTATE_MASK) {
                case 0:
                case BIT(DRM_ROTATE_0):
                        /* OK */
index 249c0330d6cecc6d3d39ce0c461e13e089a8a32e..60e1e8016708ec2f03fd09e8fb6e51e559b7b3a4 100644 (file)
@@ -134,7 +134,7 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
 /**
  * enable_vblank - enable vblank interrupt events
  * @dev: DRM device
- * @crtc: which irq to enable
+ * @pipe: which irq to enable
  *
  * Enable vblank interrupts for @crtc.  If the device doesn't have
  * a hardware vblank counter, this routine should be a no-op, since
@@ -144,13 +144,13 @@ int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
  * Zero on success, appropriate errno if the given @crtc's vblank
  * interrupt cannot be enabled.
  */
-int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
+int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct omap_drm_private *priv = dev->dev_private;
-       struct drm_crtc *crtc = priv->crtcs[crtc_id];
+       struct drm_crtc *crtc = priv->crtcs[pipe];
        unsigned long flags;
 
-       DBG("dev=%p, crtc=%d", dev, crtc_id);
+       DBG("dev=%p, crtc=%u", dev, pipe);
 
        spin_lock_irqsave(&list_lock, flags);
        priv->vblank_mask |= pipe2vbl(crtc);
@@ -163,19 +163,19 @@ int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
 /**
  * disable_vblank - disable vblank interrupt events
  * @dev: DRM device
- * @crtc: which irq to enable
+ * @pipe: which irq to enable
  *
  * Disable vblank interrupts for @crtc.  If the device doesn't have
  * a hardware vblank counter, this routine should be a no-op, since
  * interrupts will have to stay on to keep the count accurate.
  */
-void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
+void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct omap_drm_private *priv = dev->dev_private;
-       struct drm_crtc *crtc = priv->crtcs[crtc_id];
+       struct drm_crtc *crtc = priv->crtcs[pipe];
        unsigned long flags;
 
-       DBG("dev=%p, crtc=%d", dev, crtc_id);
+       DBG("dev=%p, crtc=%u", dev, pipe);
 
        spin_lock_irqsave(&list_lock, flags);
        priv->vblank_mask &= ~pipe2vbl(crtc);
index 098904696a5cada72fab1e4f221ce7621af197a5..3054bda72688dbd6b2bbfb0101c793d00fcda5c5 100644 (file)
@@ -60,17 +60,19 @@ to_omap_plane_state(struct drm_plane_state *state)
 }
 
 static int omap_plane_prepare_fb(struct drm_plane *plane,
-                                struct drm_framebuffer *fb,
                                 const struct drm_plane_state *new_state)
 {
-       return omap_framebuffer_pin(fb);
+       if (!new_state->fb)
+               return 0;
+
+       return omap_framebuffer_pin(new_state->fb);
 }
 
 static void omap_plane_cleanup_fb(struct drm_plane *plane,
-                                 struct drm_framebuffer *fb,
                                  const struct drm_plane_state *old_state)
 {
-       omap_framebuffer_unpin(fb);
+       if (old_state->fb)
+               omap_framebuffer_unpin(old_state->fb);
 }
 
 static void omap_plane_atomic_update(struct drm_plane *plane,
@@ -106,7 +108,7 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
        win.src_x = state->src_x >> 16;
        win.src_y = state->src_y >> 16;
 
-       switch (state->rotation & 0xf) {
+       switch (state->rotation & DRM_ROTATE_MASK) {
        case BIT(DRM_ROTATE_90):
        case BIT(DRM_ROTATE_270):
                win.src_w = state->src_h >> 16;
index 4649bd2ed3401ae74049798ab591c8b2e2e58779..183aea1abebc4afe5ad28f4694bc92ccc788ee8d 100644 (file)
@@ -242,6 +242,10 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
        bo->is_primary = true;
 
        ret = qxl_bo_reserve(bo, false);
+       if (ret)
+               return ret;
+       ret = qxl_bo_pin(bo, bo->type, NULL);
+       qxl_bo_unreserve(bo);
        if (ret)
                return ret;
 
@@ -257,7 +261,11 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
        }
        drm_vblank_put(dev, qcrtc->index);
 
-       qxl_bo_unreserve(bo);
+       ret = qxl_bo_reserve(bo, false);
+       if (!ret) {
+               qxl_bo_unpin(bo);
+               qxl_bo_unreserve(bo);
+       }
 
        return 0;
 }
index 83f6f0b5e9efa292e83fae0c2b9ea45d4c7241b2..7307b07fe06ba6f3aab0e60914de0a998bdbe7b0 100644 (file)
@@ -196,17 +196,18 @@ static int qxl_pm_restore(struct device *dev)
        return qxl_drm_resume(drm_dev, false);
 }
 
-static u32 qxl_noop_get_vblank_counter(struct drm_device *dev, int crtc)
+static u32 qxl_noop_get_vblank_counter(struct drm_device *dev,
+                                      unsigned int pipe)
 {
        return 0;
 }
 
-static int qxl_noop_enable_vblank(struct drm_device *dev, int crtc)
+static int qxl_noop_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        return 0;
 }
 
-static void qxl_noop_disable_vblank(struct drm_device *dev, int crtc)
+static void qxl_noop_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 }
 
index 41c422fee31a02dbc932964bc4686921e533fdd3..c4a552637c9353d70cab76083b7d7786dc436d29 100644 (file)
@@ -144,14 +144,17 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
 
        spin_lock_irqsave(&qfbdev->dirty.lock, flags);
 
-       if (qfbdev->dirty.y1 < y)
-               y = qfbdev->dirty.y1;
-       if (qfbdev->dirty.y2 > y2)
-               y2 = qfbdev->dirty.y2;
-       if (qfbdev->dirty.x1 < x)
-               x = qfbdev->dirty.x1;
-       if (qfbdev->dirty.x2 > x2)
-               x2 = qfbdev->dirty.x2;
+       if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
+           (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
+               if (qfbdev->dirty.y1 < y)
+                       y = qfbdev->dirty.y1;
+               if (qfbdev->dirty.y2 > y2)
+                       y2 = qfbdev->dirty.y2;
+               if (qfbdev->dirty.x1 < x)
+                       x = qfbdev->dirty.x1;
+               if (qfbdev->dirty.x2 > x2)
+                       x2 = qfbdev->dirty.x2;
+       }
 
        qfbdev->dirty.x1 = x;
        qfbdev->dirty.x2 = x2;
index b66ec331c17cd51f1b81022ebd29d18944258b43..4efa8e261baf59546ca24eb39920bc4159358ab7 100644 (file)
@@ -307,7 +307,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
                idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
                if (idr_ret < 0)
                        return idr_ret;
-               bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
+               bo = to_qxl_bo(entry->tv.bo);
 
                (*release)->release_offset = create_rel->release_offset + 64;
 
@@ -316,8 +316,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
                info = qxl_release_map(qdev, *release);
                info->id = idr_ret;
                qxl_release_unmap(qdev, *release, info);
-
-               qxl_bo_unref(&bo);
                return 0;
        }
 
index 2c45ac9c1dc3afc7d956387c8acd337797edacc9..14fd83b5f497468a8764cedc4dcc98ae17b799b6 100644 (file)
@@ -311,7 +311,7 @@ static void r128_cce_init_ring_buffer(struct drm_device *dev,
        /* The manual (p. 2) says this address is in "VM space".  This
         * means it's an offset from the start of AGP space.
         */
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (!dev_priv->is_pci)
                ring_start = dev_priv->cce_ring->offset - dev->agp->base;
        else
@@ -505,7 +505,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
            (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
                                  init->sarea_priv_offset);
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (!dev_priv->is_pci) {
                drm_legacy_ioremap_wc(dev_priv->cce_ring, dev);
                drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
@@ -529,7 +529,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
                        (void *)(unsigned long)dev->agp_buffer_map->offset;
        }
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (!dev_priv->is_pci)
                dev_priv->cce_buffers_offset = dev->agp->base;
        else
@@ -552,7 +552,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
        dev_priv->sarea_priv->last_dispatch = 0;
        R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->is_pci) {
 #endif
                dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
@@ -568,7 +568,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
                        return -ENOMEM;
                }
                R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        }
 #endif
 
@@ -600,7 +600,7 @@ int r128_do_cleanup_cce(struct drm_device *dev)
        if (dev->dev_private) {
                drm_r128_private_t *dev_priv = dev->dev_private;
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
                if (!dev_priv->is_pci) {
                        if (dev_priv->cce_ring != NULL)
                                drm_legacy_ioremapfree(dev_priv->cce_ring, dev);
index 723e5d6f10a4b96ed1a7db6f5f71f6c51d6a1f3c..09143b840482c86702650f649c773b034de1426d 100644 (file)
@@ -154,9 +154,9 @@ extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
 extern int r128_do_cce_idle(drm_r128_private_t *dev_priv);
 extern int r128_do_cleanup_cce(struct drm_device *dev);
 
-extern int r128_enable_vblank(struct drm_device *dev, int crtc);
-extern void r128_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int r128_enable_vblank(struct drm_device *dev, unsigned int pipe);
+extern void r128_disable_vblank(struct drm_device *dev, unsigned int pipe);
+extern u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
 extern irqreturn_t r128_driver_irq_handler(int irq, void *arg);
 extern void r128_driver_irq_preinstall(struct drm_device *dev);
 extern int r128_driver_irq_postinstall(struct drm_device *dev);
index c2ae496babb7374da8381c688f15883ebc533bc0..9730f4918944db8b3147bf2c85144bb310b0b5c6 100644 (file)
 #include <drm/r128_drm.h>
 #include "r128_drv.h"
 
-u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
+u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        const drm_r128_private_t *dev_priv = dev->dev_private;
 
-       if (crtc != 0)
+       if (pipe != 0)
                return 0;
 
        return atomic_read(&dev_priv->vbl_received);
@@ -62,12 +62,12 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
        return IRQ_NONE;
 }
 
-int r128_enable_vblank(struct drm_device *dev, int crtc)
+int r128_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        drm_r128_private_t *dev_priv = dev->dev_private;
 
-       if (crtc != 0) {
-               DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+       if (pipe != 0) {
+               DRM_ERROR("%s:  bad crtc %u\n", __func__, pipe);
                return -EINVAL;
        }
 
@@ -75,10 +75,10 @@ int r128_enable_vblank(struct drm_device *dev, int crtc)
        return 0;
 }
 
-void r128_disable_vblank(struct drm_device *dev, int crtc)
+void r128_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       if (crtc != 0)
-               DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+       if (pipe != 0)
+               DRM_ERROR("%s:  bad crtc %u\n", __func__, pipe);
 
        /*
         * FIXME: implement proper interrupt disable by using the vblank
index 9cd49c584263c895ad368586a1df58120d9c9a71..bd73b4069069b900b01e4858bc38bb42dfc3ac10 100644 (file)
@@ -179,6 +179,7 @@ radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        switch (msg->request & ~DP_AUX_I2C_MOT) {
        case DP_AUX_NATIVE_WRITE:
        case DP_AUX_I2C_WRITE:
+       case DP_AUX_I2C_WRITE_STATUS_UPDATE:
                /* The atom implementation only supports writes with a max payload of
                 * 12 bytes since it uses 4 bits for the total count (header + payload)
                 * in the parameter space.  The atom interface supports 16 byte
index c9e0fbbf76a3cb8f29c6b41a0beea3554c832551..46f87d4aaf31fef4ae90d50321aee3f67439031f 100644 (file)
@@ -34,6 +34,8 @@
 #define MAX(a,b)                   (((a)>(b))?(a):(b))
 #define MIN(a,b)                   (((a)<(b))?(a):(b))
 
+#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
+
 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
                           struct radeon_bo_list **cs_reloc);
 struct evergreen_cs_track {
@@ -84,6 +86,7 @@ struct evergreen_cs_track {
        u32                     htile_surface;
        struct radeon_bo        *htile_bo;
        unsigned long           indirect_draw_buffer_size;
+       const unsigned          *reg_safe_bm;
 };
 
 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
@@ -444,7 +447,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
                 * command stream.
                 */
                if (!surf.mode) {
-                       volatile u32 *ib = p->ib.ptr;
+                       uint32_t *ib = p->ib.ptr;
                        unsigned long tmp, nby, bsize, size, min = 0;
 
                        /* find the height the ddx wants */
@@ -1083,41 +1086,18 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
 }
 
 /**
- * evergreen_cs_check_reg() - check if register is authorized or not
+ * evergreen_cs_handle_reg() - process registers that need special handling.
  * @parser: parser structure holding parsing context
  * @reg: register we are testing
  * @idx: index into the cs buffer
- *
- * This function will test against evergreen_reg_safe_bm and return 0
- * if register is safe. If register is not flag as safe this function
- * will test it against a list of register needind special handling.
  */
-static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 {
        struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
        struct radeon_bo_list *reloc;
-       u32 last_reg;
-       u32 m, i, tmp, *ib;
+       u32 tmp, *ib;
        int r;
 
-       if (p->rdev->family >= CHIP_CAYMAN)
-               last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
-       else
-               last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
-
-       i = (reg >> 7);
-       if (i >= last_reg) {
-               dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
-               return -EINVAL;
-       }
-       m = 1 << ((reg >> 2) & 31);
-       if (p->rdev->family >= CHIP_CAYMAN) {
-               if (!(cayman_reg_safe_bm[i] & m))
-                       return 0;
-       } else {
-               if (!(evergreen_reg_safe_bm[i] & m))
-                       return 0;
-       }
        ib = p->ib.ptr;
        switch (reg) {
        /* force following reg to 0 in an attempt to disable out buffer
@@ -1764,29 +1744,27 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        return 0;
 }
 
-static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+/**
+ * evergreen_is_safe_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ *
+ * This function will test against reg_safe_bm and return true
+ * if register is safe or false otherwise.
+ */
+static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
 {
-       u32 last_reg, m, i;
-
-       if (p->rdev->family >= CHIP_CAYMAN)
-               last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
-       else
-               last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+       struct evergreen_cs_track *track = p->track;
+       u32 m, i;
 
        i = (reg >> 7);
-       if (i >= last_reg) {
-               dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+       if (unlikely(i >= REG_SAFE_BM_SIZE)) {
                return false;
        }
        m = 1 << ((reg >> 2) & 31);
-       if (p->rdev->family >= CHIP_CAYMAN) {
-               if (!(cayman_reg_safe_bm[i] & m))
-                       return true;
-       } else {
-               if (!(evergreen_reg_safe_bm[i] & m))
-                       return true;
-       }
-       dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+       if (!(track->reg_safe_bm[i] & m))
+               return true;
+
        return false;
 }
 
@@ -1795,7 +1773,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
 {
        struct radeon_bo_list *reloc;
        struct evergreen_cs_track *track;
-       volatile u32 *ib;
+       uint32_t *ib;
        unsigned idx;
        unsigned i;
        unsigned start_reg, end_reg, reg;
@@ -2321,9 +2299,10 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
                        return -EINVAL;
                }
-               for (i = 0; i < pkt->count; i++) {
-                       reg = start_reg + (4 * i);
-                       r = evergreen_cs_check_reg(p, reg, idx+1+i);
+               for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
+                       if (evergreen_is_safe_reg(p, reg))
+                               continue;
+                       r = evergreen_cs_handle_reg(p, reg, idx);
                        if (r)
                                return r;
                }
@@ -2337,9 +2316,10 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
                        return -EINVAL;
                }
-               for (i = 0; i < pkt->count; i++) {
-                       reg = start_reg + (4 * i);
-                       r = evergreen_cs_check_reg(p, reg, idx+1+i);
+               for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
+                       if (evergreen_is_safe_reg(p, reg))
+                               continue;
+                       r = evergreen_cs_handle_reg(p, reg, idx);
                        if (r)
                                return r;
                }
@@ -2594,8 +2574,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                } else {
                        /* SRC is a reg. */
                        reg = radeon_get_ib_value(p, idx+1) << 2;
-                       if (!evergreen_is_safe_reg(p, reg, idx+1))
+                       if (!evergreen_is_safe_reg(p, reg)) {
+                               dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+                                        reg, idx + 1);
                                return -EINVAL;
+                       }
                }
                if (idx_value & 0x2) {
                        u64 offset;
@@ -2618,8 +2601,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                } else {
                        /* DST is a reg. */
                        reg = radeon_get_ib_value(p, idx+3) << 2;
-                       if (!evergreen_is_safe_reg(p, reg, idx+3))
+                       if (!evergreen_is_safe_reg(p, reg)) {
+                               dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
+                                        reg, idx + 3);
                                return -EINVAL;
+                       }
                }
                break;
        case PACKET3_NOP:
@@ -2644,11 +2630,15 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
                if (track == NULL)
                        return -ENOMEM;
                evergreen_cs_track_init(track);
-               if (p->rdev->family >= CHIP_CAYMAN)
+               if (p->rdev->family >= CHIP_CAYMAN) {
                        tmp = p->rdev->config.cayman.tile_config;
-               else
+                       track->reg_safe_bm = cayman_reg_safe_bm;
+               } else {
                        tmp = p->rdev->config.evergreen.tile_config;
-
+                       track->reg_safe_bm = evergreen_reg_safe_bm;
+               }
+               BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE);
+               BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE);
                switch (tmp & 0xf) {
                case 0:
                        track->npipes = 1;
@@ -2757,7 +2747,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
        struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
        struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
        u32 header, cmd, count, sub_cmd;
-       volatile u32 *ib = p->ib.ptr;
+       uint32_t *ib = p->ib.ptr;
        u32 idx;
        u64 src_offset, dst_offset, dst2_offset;
        int r;
index 98f9adaccc3dadfc75c85ff5a7cbdf9994e48abc..e231eeafef23a577003269f25490104cdec29eba 100644 (file)
@@ -1837,7 +1837,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
        SET_RING_HEAD(dev_priv, 0);
        dev_priv->ring.tail = 0;
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                rptr_addr = dev_priv->ring_rptr->offset
                        - dev->agp->base +
@@ -1863,7 +1863,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
                     dev_priv->ring.size_l2qw);
 #endif
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                /* XXX */
                radeon_write_agp_base(dev_priv, dev->agp->base);
@@ -1946,7 +1946,7 @@ int r600_do_cleanup_cp(struct drm_device *dev)
        if (dev->irq_enabled)
                drm_irq_uninstall(dev);
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                if (dev_priv->cp_ring != NULL) {
                        drm_legacy_ioremapfree(dev_priv->cp_ring, dev);
@@ -2089,7 +2089,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
                }
        }
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        /* XXX */
        if (dev_priv->flags & RADEON_IS_AGP) {
                drm_legacy_ioremap_wc(dev_priv->cp_ring, dev);
@@ -2148,7 +2148,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
                 * location in the card and on the bus, though we have to
                 * align it down.
                 */
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
                /* XXX */
                if (dev_priv->flags & RADEON_IS_AGP) {
                        base = dev->agp->base;
@@ -2175,7 +2175,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
                                 base, dev_priv->gart_vm_start);
        }
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        /* XXX */
        if (dev_priv->flags & RADEON_IS_AGP)
                dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
@@ -2212,7 +2212,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
 
        dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                /* XXX turn off pcie gart */
        } else
index 77e9d07c55b6701cbdb0a55bcbc9b68954e5c0d7..59acd0e5c2c6384705fdda0a6b89e65be5f5abc4 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <linux/power_supply.h>
-#include <linux/vga_switcheroo.h>
 #include <acpi/video.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
index a9297b2c3524ec128e3df129b21a4cd027cf20dd..fe994aac3b0403357d40e9b29f46e8818be454bf 100644 (file)
@@ -28,7 +28,7 @@
 #include "radeon.h"
 #include <drm/radeon_drm.h>
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 
 struct radeon_agpmode_quirk {
        u32 hostbridge_vendor;
@@ -123,7 +123,7 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
 
 int radeon_agp_init(struct radeon_device *rdev)
 {
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
        struct drm_agp_mode mode;
        struct drm_agp_info info;
@@ -257,7 +257,7 @@ int radeon_agp_init(struct radeon_device *rdev)
 
 void radeon_agp_resume(struct radeon_device *rdev)
 {
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        int r;
        if (rdev->flags & RADEON_IS_AGP) {
                r = radeon_agp_init(rdev);
@@ -269,7 +269,7 @@ void radeon_agp_resume(struct radeon_device *rdev)
 
 void radeon_agp_fini(struct radeon_device *rdev)
 {
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
                drm_agp_release(rdev->ddev);
        }
index f2421bc3e901904d8fbe92deffb9e98d033de62b..1d4d4520a0ac27a6b4944a9d7abcfdb4f209ec07 100644 (file)
@@ -31,7 +31,6 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
 #include <linux/vgaarb.h>
-#include <linux/vga_switcheroo.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 #include "radeon_asic.h"
index 8bc7d0bbd3c80a94529cea79bf4f078d7951a87a..a771b9f0bf98d60e51f1c15f3f0b69f7832b08ca 100644 (file)
@@ -535,7 +535,7 @@ static bool radeon_atpx_detect(void)
 
        if (has_atpx && vga_count == 2) {
                acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
-               printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+               printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
                       acpi_method_name);
                radeon_atpx_priv.atpx_detected = true;
                return true;
index d27e4ccb848c9c60e8a14f71336b790b125d2231..21b6732425c50d4b368d6b0fb016ea507a8aa6a1 100644 (file)
@@ -30,7 +30,6 @@
 #include "radeon.h"
 #include "atom.h"
 
-#include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
 /*
index ea134a7d51a51f8adb92856bb98a4e442a21e7f8..500287eff55db2373e7359001b60628fd7ae41b7 100644 (file)
@@ -762,7 +762,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
                             ((dev_priv->gart_vm_start - 1) & 0xffff0000)
                             | (dev_priv->fb_location >> 16));
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                radeon_write_agp_base(dev_priv, dev->agp->base);
 
@@ -791,7 +791,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
        SET_RING_HEAD(dev_priv, cur_read_ptr);
        dev_priv->ring.tail = cur_read_ptr;
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
                             dev_priv->ring_rptr->offset
@@ -1335,7 +1335,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
                }
        }
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                drm_legacy_ioremap_wc(dev_priv->cp_ring, dev);
                drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
@@ -1394,7 +1394,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
                 * location in the card and on the bus, though we have to
                 * align it down.
                 */
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
                if (dev_priv->flags & RADEON_IS_AGP) {
                        base = dev->agp->base;
                        /* Check if valid */
@@ -1424,7 +1424,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
                        RADEON_READ(RADEON_CONFIG_APER_SIZE);
        }
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP)
                dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
                                                 - dev->agp->base
@@ -1455,7 +1455,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
 
        dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                /* Turn off PCI GART */
                radeon_set_pcigart(dev_priv, 0);
@@ -1566,7 +1566,7 @@ static int radeon_do_cleanup_cp(struct drm_device * dev)
        if (dev->irq_enabled)
                drm_irq_uninstall(dev);
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                if (dev_priv->cp_ring != NULL) {
                        drm_legacy_ioremapfree(dev_priv->cp_ring, dev);
@@ -1625,7 +1625,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
 
        DRM_DEBUG("Starting radeon_do_resume_cp()\n");
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (dev_priv->flags & RADEON_IS_AGP) {
                /* Turn off PCI GART */
                radeon_set_pcigart(dev_priv, 0);
index f3f562f6d848d17d3cc4e48d2701f5de5a14a46a..c566993a2ec3bcbe393ff14d079b0134f0426c7d 100644 (file)
@@ -1197,7 +1197,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
  * radeon_switcheroo_set_state - set switcheroo state
  *
  * @pdev: pci dev pointer
- * @state: vga switcheroo state
+ * @state: vga_switcheroo state
  *
  * Callback for the switcheroo driver.  Suspends or resumes the
  * the asics before or after it is powered up using ACPI methods.
index d2e9e9efc159c053b954aed21840ebe7d91f2739..a8d9927ed9eb9657d89e34c84481c9c9c9e3513f 100644 (file)
@@ -323,7 +323,8 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
         */
        if (update_pending &&
            (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0,
-                                                              &vpos, &hpos, NULL, NULL)) &&
+                                                              &vpos, &hpos, NULL, NULL,
+                                                              &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
            ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
             (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
                /* crtc didn't flip in this target vblank interval,
@@ -1633,18 +1634,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
        radeon_fbdev_init(rdev);
        drm_kms_helper_poll_init(rdev->ddev);
 
-       if (rdev->pm.dpm_enabled) {
-               /* do dpm late init */
-               ret = radeon_pm_late_init(rdev);
-               if (ret) {
-                       rdev->pm.dpm_enabled = false;
-                       DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
-               }
-               /* set the dpm state for PX since there won't be
-                * a modeset to call this.
-                */
-               radeon_pm_compute_clocks(rdev);
-       }
+       /* do pm late init */
+       ret = radeon_pm_late_init(rdev);
 
        return 0;
 }
@@ -1798,8 +1789,10 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
  * unknown small number of scanlines wrt. real scanout position.
  *
  */
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
-                              int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+                              unsigned int flags, int *vpos, int *hpos,
+                              ktime_t *stime, ktime_t *etime,
+                              const struct drm_display_mode *mode)
 {
        u32 stat_crtc = 0, vbl = 0, position = 0;
        int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1814,42 +1807,42 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
                *stime = ktime_get();
 
        if (ASIC_IS_DCE4(rdev)) {
-               if (crtc == 0) {
+               if (pipe == 0) {
                        vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
                                     EVERGREEN_CRTC0_REGISTER_OFFSET);
                        position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
                                          EVERGREEN_CRTC0_REGISTER_OFFSET);
                        ret |= DRM_SCANOUTPOS_VALID;
                }
-               if (crtc == 1) {
+               if (pipe == 1) {
                        vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
                                     EVERGREEN_CRTC1_REGISTER_OFFSET);
                        position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
                                          EVERGREEN_CRTC1_REGISTER_OFFSET);
                        ret |= DRM_SCANOUTPOS_VALID;
                }
-               if (crtc == 2) {
+               if (pipe == 2) {
                        vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
                                     EVERGREEN_CRTC2_REGISTER_OFFSET);
                        position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
                                          EVERGREEN_CRTC2_REGISTER_OFFSET);
                        ret |= DRM_SCANOUTPOS_VALID;
                }
-               if (crtc == 3) {
+               if (pipe == 3) {
                        vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
                                     EVERGREEN_CRTC3_REGISTER_OFFSET);
                        position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
                                          EVERGREEN_CRTC3_REGISTER_OFFSET);
                        ret |= DRM_SCANOUTPOS_VALID;
                }
-               if (crtc == 4) {
+               if (pipe == 4) {
                        vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
                                     EVERGREEN_CRTC4_REGISTER_OFFSET);
                        position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
                                          EVERGREEN_CRTC4_REGISTER_OFFSET);
                        ret |= DRM_SCANOUTPOS_VALID;
                }
-               if (crtc == 5) {
+               if (pipe == 5) {
                        vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
                                     EVERGREEN_CRTC5_REGISTER_OFFSET);
                        position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
@@ -1857,19 +1850,19 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
                        ret |= DRM_SCANOUTPOS_VALID;
                }
        } else if (ASIC_IS_AVIVO(rdev)) {
-               if (crtc == 0) {
+               if (pipe == 0) {
                        vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
                        position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
                        ret |= DRM_SCANOUTPOS_VALID;
                }
-               if (crtc == 1) {
+               if (pipe == 1) {
                        vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
                        position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
                        ret |= DRM_SCANOUTPOS_VALID;
                }
        } else {
                /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
-               if (crtc == 0) {
+               if (pipe == 0) {
                        /* Assume vbl_end == 0, get vbl_start from
                         * upper 16 bits.
                         */
@@ -1883,7 +1876,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
 
                        ret |= DRM_SCANOUTPOS_VALID;
                }
-               if (crtc == 1) {
+               if (pipe == 1) {
                        vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
                                RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
                        position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
@@ -1914,7 +1907,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
        }
        else {
                /* No: Fake something reasonable which gives at least ok results. */
-               vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
+               vbl_start = mode->crtc_vdisplay;
                vbl_end = 0;
        }
 
@@ -1930,7 +1923,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
 
        /* Inside "upper part" of vblank area? Apply corrective offset if so: */
        if (in_vbl && (*vpos >= vbl_start)) {
-               vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+               vtotal = mode->crtc_vtotal;
                *vpos = *vpos - vtotal;
        }
 
@@ -1952,8 +1945,8 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int fl
         * We only do this if DRM_CALLED_FROM_VBLIRQ.
         */
        if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
-               vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
-               vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
+               vbl_start = mode->crtc_vdisplay;
+               vtotal = mode->crtc_vtotal;
 
                if (vbl_start - *vpos < vtotal / 100) {
                        *vpos -= vtotal;
index 5751446677d382428846b14fc6d37d908bb582b9..5b6a6f5b3619e582afbb029279762d6bca5aa501 100644 (file)
@@ -105,10 +105,10 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
                                struct drm_file *file_priv);
 int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
-int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
-void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
-int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
+int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
                                    int *max_error,
                                    struct timeval *vblank_time,
                                    unsigned flags);
@@ -124,10 +124,10 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
 struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
                                        struct drm_gem_object *gobj,
                                        int flags);
-extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
-                                     unsigned int flags,
-                                     int *vpos, int *hpos, ktime_t *stime,
-                                     ktime_t *etime);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
+                                     unsigned int flags, int *vpos, int *hpos,
+                                     ktime_t *stime, ktime_t *etime,
+                                     const struct drm_display_mode *mode);
 extern bool radeon_is_px(struct drm_device *dev);
 extern const struct drm_ioctl_desc radeon_ioctls_kms[];
 extern int radeon_max_kms_ioctl;
index 46bd3938282ca84a6a19338f476daee049575ce7..0caafc7a6e17c3bb765e5b7b9404d7e267587eb5 100644 (file)
@@ -404,9 +404,9 @@ extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *
 extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
 
 extern void radeon_do_release(struct drm_device * dev);
-extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
-extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
-extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 radeon_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+extern int radeon_enable_vblank(struct drm_device *dev, unsigned int pipe);
+extern void radeon_disable_vblank(struct drm_device *dev, unsigned int pipe);
 extern irqreturn_t radeon_driver_irq_handler(int irq, void *arg);
 extern void radeon_driver_irq_preinstall(struct drm_device * dev);
 extern int radeon_driver_irq_postinstall(struct drm_device *dev);
index 1aa657fe31cb26f88c2d413bb1a38974e091eee2..26da2f4d7b4f56fca3948af07bca9c061bb5ddaf 100644 (file)
@@ -397,3 +397,19 @@ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector
 {
        drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
 }
+
+void radeon_fbdev_restore_mode(struct radeon_device *rdev)
+{
+       struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev;
+       struct drm_fb_helper *fb_helper;
+       int ret;
+
+       if (!rfbdev)
+               return;
+
+       fb_helper = &rfbdev->helper;
+
+       ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       if (ret)
+               DRM_DEBUG("failed to restore crtc mode\n");
+}
index 244b19bab2e72406648ef1b2eae8ef7965bfcf01..688afb62f7c467f497002f3cda30e43a2cb63766 100644 (file)
@@ -62,12 +62,12 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
                RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
 }
 
-int radeon_enable_vblank(struct drm_device *dev, int crtc)
+int radeon_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
 
        if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
-               switch (crtc) {
+               switch (pipe) {
                case 0:
                        r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
                        break;
@@ -75,12 +75,12 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
                        r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
                        break;
                default:
-                       DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
-                                 crtc);
+                       DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
+                                 pipe);
                        return -EINVAL;
                }
        } else {
-               switch (crtc) {
+               switch (pipe) {
                case 0:
                        radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
                        break;
@@ -88,8 +88,8 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
                        radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
                        break;
                default:
-                       DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
-                                 crtc);
+                       DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
+                                 pipe);
                        return -EINVAL;
                }
        }
@@ -97,12 +97,12 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
        return 0;
 }
 
-void radeon_disable_vblank(struct drm_device *dev, int crtc)
+void radeon_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
 
        if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
-               switch (crtc) {
+               switch (pipe) {
                case 0:
                        r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
                        break;
@@ -110,12 +110,12 @@ void radeon_disable_vblank(struct drm_device *dev, int crtc)
                        r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
                        break;
                default:
-                       DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
-                                 crtc);
+                       DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
+                                 pipe);
                        break;
                }
        } else {
-               switch (crtc) {
+               switch (pipe) {
                case 0:
                        radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
                        break;
@@ -123,8 +123,8 @@ void radeon_disable_vblank(struct drm_device *dev, int crtc)
                        radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
                        break;
                default:
-                       DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
-                                 crtc);
+                       DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
+                                 pipe);
                        break;
                }
        }
@@ -255,7 +255,7 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
        return ret;
 }
 
-u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
+u32 radeon_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
 
@@ -264,18 +264,18 @@ u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
                return -EINVAL;
        }
 
-       if (crtc < 0 || crtc > 1) {
-               DRM_ERROR("Invalid crtc %d\n", crtc);
+       if (pipe > 1) {
+               DRM_ERROR("Invalid crtc %u\n", pipe);
                return -EINVAL;
        }
 
        if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
-               if (crtc == 0)
+               if (pipe == 0)
                        return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
                else
                        return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
        } else {
-               if (crtc == 0)
+               if (pipe == 0)
                        return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
                else
                        return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
index 4a119c255ba9709692b234c51a928d826cc22ec2..6f50a37b18cbccdb5f931c86b15e82b084a83040 100644 (file)
@@ -598,14 +598,17 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
 /**
- * radeon_driver_firstopen_kms - drm callback for last close
+ * radeon_driver_lastclose_kms - drm callback for last close
  *
  * @dev: drm dev pointer
  *
- * Switch vga switcheroo state after last close (all asics).
+ * Switch vga_switcheroo state after last close (all asics).
  */
 void radeon_driver_lastclose_kms(struct drm_device *dev)
 {
+       struct radeon_device *rdev = dev->dev_private;
+
+       radeon_fbdev_restore_mode(rdev);
        vga_switcheroo_process_delayed_switch();
 }
 
@@ -841,77 +844,37 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
        /* Helper routine in DRM core does all the work: */
        return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
                                                     vblank_time, flags,
-                                                    drmcrtc, &drmcrtc->hwmode);
-}
-
-#define KMS_INVALID_IOCTL(name)                                                \
-static int name(struct drm_device *dev, void *data, struct drm_file    \
-               *file_priv)                                             \
-{                                                                      \
-       DRM_ERROR("invalid ioctl with kms %s\n", __func__);             \
-       return -EINVAL;                                                 \
+                                                    &drmcrtc->hwmode);
 }
 
-/*
- * All these ioctls are invalid in kms world.
- */
-KMS_INVALID_IOCTL(radeon_cp_init_kms)
-KMS_INVALID_IOCTL(radeon_cp_start_kms)
-KMS_INVALID_IOCTL(radeon_cp_stop_kms)
-KMS_INVALID_IOCTL(radeon_cp_reset_kms)
-KMS_INVALID_IOCTL(radeon_cp_idle_kms)
-KMS_INVALID_IOCTL(radeon_cp_resume_kms)
-KMS_INVALID_IOCTL(radeon_engine_reset_kms)
-KMS_INVALID_IOCTL(radeon_fullscreen_kms)
-KMS_INVALID_IOCTL(radeon_cp_swap_kms)
-KMS_INVALID_IOCTL(radeon_cp_clear_kms)
-KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
-KMS_INVALID_IOCTL(radeon_cp_indices_kms)
-KMS_INVALID_IOCTL(radeon_cp_texture_kms)
-KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
-KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
-KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
-KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
-KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
-KMS_INVALID_IOCTL(radeon_cp_flip_kms)
-KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
-KMS_INVALID_IOCTL(radeon_mem_free_kms)
-KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
-KMS_INVALID_IOCTL(radeon_irq_emit_kms)
-KMS_INVALID_IOCTL(radeon_irq_wait_kms)
-KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
-KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
-KMS_INVALID_IOCTL(radeon_surface_free_kms)
-
-
 const struct drm_ioctl_desc radeon_ioctls_kms[] = {
-       DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
        /* KMS */
        DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
index aecc3e3dec0ca093441e3871df414627b51e92ec..830e171c3a9e35c108f69acd2d4d45386033cd8b 100644 (file)
@@ -874,10 +874,10 @@ extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                                   int x, int y);
 extern void radeon_cursor_reset(struct drm_crtc *crtc);
 
-extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
-                                     unsigned int flags,
-                                     int *vpos, int *hpos, ktime_t *stime,
-                                     ktime_t *etime);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+                                     unsigned int flags, int *vpos, int *hpos,
+                                     ktime_t *stime, ktime_t *etime,
+                                     const struct drm_display_mode *mode);
 
 extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
 extern struct edid *
@@ -980,6 +980,7 @@ int radeon_fbdev_init(struct radeon_device *rdev);
 void radeon_fbdev_fini(struct radeon_device *rdev);
 void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+void radeon_fbdev_restore_mode(struct radeon_device *rdev);
 
 void radeon_fb_output_poll_changed(struct radeon_device *rdev);
 
index 05751f3f84449d40457b3f989f0d7ab874935bbf..bcdc508127f104c482b6900404272057bb687009 100644 (file)
@@ -1326,14 +1326,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
        INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
 
        if (rdev->pm.num_power_states > 1) {
-               /* where's the best place to put these? */
-               ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-               if (ret)
-                       DRM_ERROR("failed to create device file for power profile\n");
-               ret = device_create_file(rdev->dev, &dev_attr_power_method);
-               if (ret)
-                       DRM_ERROR("failed to create device file for power method\n");
-
                if (radeon_debugfs_pm_init(rdev)) {
                        DRM_ERROR("Failed to register debugfs file for PM!\n");
                }
@@ -1391,20 +1383,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
                goto dpm_failed;
        rdev->pm.dpm_enabled = true;
 
-       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
-       if (ret)
-               DRM_ERROR("failed to create device file for dpm state\n");
-       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
-       if (ret)
-               DRM_ERROR("failed to create device file for dpm state\n");
-       /* XXX: these are noops for dpm but are here for backwards compat */
-       ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-       if (ret)
-               DRM_ERROR("failed to create device file for power profile\n");
-       ret = device_create_file(rdev->dev, &dev_attr_power_method);
-       if (ret)
-               DRM_ERROR("failed to create device file for power method\n");
-
        if (radeon_debugfs_pm_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for dpm!\n");
        }
@@ -1545,9 +1523,44 @@ int radeon_pm_late_init(struct radeon_device *rdev)
        int ret = 0;
 
        if (rdev->pm.pm_method == PM_METHOD_DPM) {
-               mutex_lock(&rdev->pm.mutex);
-               ret = radeon_dpm_late_enable(rdev);
-               mutex_unlock(&rdev->pm.mutex);
+               if (rdev->pm.dpm_enabled) {
+                       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for dpm state\n");
+                       ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for dpm state\n");
+                       /* XXX: these are noops for dpm but are here for backwards compat */
+                       ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power profile\n");
+                       ret = device_create_file(rdev->dev, &dev_attr_power_method);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power method\n");
+
+                       mutex_lock(&rdev->pm.mutex);
+                       ret = radeon_dpm_late_enable(rdev);
+                       mutex_unlock(&rdev->pm.mutex);
+                       if (ret) {
+                               rdev->pm.dpm_enabled = false;
+                               DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+                       } else {
+                               /* set the dpm state for PX since there won't be
+                                * a modeset to call this.
+                                */
+                               radeon_pm_compute_clocks(rdev);
+                       }
+               }
+       } else {
+               if (rdev->pm.num_power_states > 1) {
+                       /* where's the best place to put these? */
+                       ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power profile\n");
+                       ret = device_create_file(rdev->dev, &dev_attr_power_method);
+                       if (ret)
+                               DRM_ERROR("failed to create device file for power method\n");
+               }
        }
        return ret;
 }
@@ -1733,7 +1746,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
         */
        for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
                if (rdev->pm.active_crtcs & (1 << crtc)) {
-                       vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
+                       vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0,
+                                                               &vpos, &hpos, NULL, NULL,
+                                                               &rdev->mode_info.crtcs[crtc]->base.hwmode);
                        if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
                            !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
                                in_vbl = false;
index 06ac59fe332ab089d21b279aba092f615710c7ca..e34307459e501f60895ca4a0abe156995706f921 100644 (file)
@@ -144,7 +144,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
                man->available_caching = TTM_PL_MASK_CACHING;
                man->default_caching = TTM_PL_FLAG_CACHED;
                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
                if (rdev->flags & RADEON_IS_AGP) {
                        if (!rdev->ddev->agp) {
                                DRM_ERROR("AGP is not enabled for memory type %u\n",
@@ -461,7 +461,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
                /* system memory */
                return 0;
        case TTM_PL_TT:
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
                if (rdev->flags & RADEON_IS_AGP) {
                        /* RADEON_IS_AGP is set only if AGP is active */
                        mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -680,7 +680,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
        struct radeon_ttm_tt *gtt;
 
        rdev = radeon_get_rdev(bdev);
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (rdev->flags & RADEON_IS_AGP) {
                return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
                                         size, page_flags, dummy_read_page);
@@ -736,7 +736,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
        }
 
        rdev = radeon_get_rdev(ttm->bdev);
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (rdev->flags & RADEON_IS_AGP) {
                return ttm_agp_tt_populate(ttm);
        }
@@ -787,7 +787,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
                return;
 
        rdev = radeon_get_rdev(ttm->bdev);
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
        if (rdev->flags & RADEON_IS_AGP) {
                ttm_agp_tt_unpopulate(ttm);
                return;
index e9115d3f67b0ca0a34ff68ce564b316895c81939..e72bf46042e0a42f469cbfd8ff285b1ae9abb155 100644 (file)
@@ -2928,6 +2928,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
        { 0, 0, 0, 0 },
 };
 
index 780ca11512ba6cf138c593327186cdd7ab751777..feddda0aaea22f6d04750255cbce94838e2666a3 100644 (file)
@@ -221,20 +221,20 @@ static void rcar_du_lastclose(struct drm_device *dev)
        drm_fbdev_cma_restore_mode(rcdu->fbdev);
 }
 
-static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
+static int rcar_du_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct rcar_du_device *rcdu = dev->dev_private;
 
-       rcar_du_crtc_enable_vblank(&rcdu->crtcs[crtc], true);
+       rcar_du_crtc_enable_vblank(&rcdu->crtcs[pipe], true);
 
        return 0;
 }
 
-static void rcar_du_disable_vblank(struct drm_device *dev, int crtc)
+static void rcar_du_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct rcar_du_device *rcdu = dev->dev_private;
 
-       rcar_du_crtc_enable_vblank(&rcdu->crtcs[crtc], false);
+       rcar_du_crtc_enable_vblank(&rcdu->crtcs[pipe], false);
 }
 
 static const struct file_operations rcar_du_fops = {
@@ -259,7 +259,7 @@ static struct drm_driver rcar_du_driver = {
        .preclose               = rcar_du_preclose,
        .lastclose              = rcar_du_lastclose,
        .set_busid              = drm_platform_set_busid,
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = rcar_du_enable_vblank,
        .disable_vblank         = rcar_du_disable_vblank,
        .gem_free_object        = drm_gem_cma_free_object,
index 56518eb1269a5cce724f733747bd64bc1fea03e2..ca12e8ca5552b58610bb7cff68c93b60968153db 100644 (file)
@@ -456,7 +456,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
        /* Apply the atomic update. */
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
-       drm_atomic_helper_commit_planes(dev, old_state);
+       drm_atomic_helper_commit_planes(dev, old_state, false);
 
        drm_atomic_helper_wait_for_vblanks(dev, old_state);
 
index 9a0c2911272a9062a65f10096928e73edd35dd4f..f22e1e1ee64aae52a1e3efb657a78790bd6ba573 100644 (file)
@@ -103,7 +103,8 @@ static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
        return NULL;
 }
 
-static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
+static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev,
+                                          unsigned int pipe)
 {
        struct rockchip_drm_private *priv = dev->dev_private;
        struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
@@ -115,7 +116,8 @@ static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
        return 0;
 }
 
-static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
+static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
+                                            unsigned int pipe)
 {
        struct rockchip_drm_private *priv = dev->dev_private;
        struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
@@ -277,7 +279,7 @@ static struct drm_driver rockchip_drm_driver = {
        .load                   = rockchip_drm_load,
        .unload                 = rockchip_drm_unload,
        .lastclose              = rockchip_drm_lastclose,
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = rockchip_drm_crtc_enable_vblank,
        .disable_vblank         = rockchip_drm_crtc_disable_vblank,
        .gem_vm_ops             = &rockchip_drm_vm_ops,
index 666321de7b9928d9dc88af09cfd95efd0a5dbadb..04e66e3751b49859c93bc66d1c85f518f564038e 100644 (file)
@@ -231,7 +231,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static int shmob_drm_enable_vblank(struct drm_device *dev, int crtc)
+static int shmob_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct shmob_drm_device *sdev = dev->dev_private;
 
@@ -240,7 +240,7 @@ static int shmob_drm_enable_vblank(struct drm_device *dev, int crtc)
        return 0;
 }
 
-static void shmob_drm_disable_vblank(struct drm_device *dev, int crtc)
+static void shmob_drm_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct shmob_drm_device *sdev = dev->dev_private;
 
@@ -269,7 +269,7 @@ static struct drm_driver shmob_drm_driver = {
        .preclose               = shmob_drm_preclose,
        .set_busid              = drm_platform_set_busid,
        .irq_handler            = shmob_drm_irq,
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = shmob_drm_enable_vblank,
        .disable_vblank         = shmob_drm_disable_vblank,
        .gem_free_object        = drm_gem_cma_free_object,
index 16f972b2a76a09c8bee6d8b1ce110b0b4572c00c..328f8a750976fda4d1435124535299a3350691cc 100644 (file)
@@ -67,6 +67,10 @@ typedef struct drm_sis_private {
        struct idr object_idr;
 } drm_sis_private_t;
 
+struct sis_file_private {
+       struct list_head obj_list;
+};
+
 extern int sis_idle(struct drm_device *dev);
 extern void sis_reclaim_buffers_locked(struct drm_device *dev,
                                       struct drm_file *file_priv);
index 018ffc970e96e6cd209935c2e6c5f480a4676ebb..493c4a3006adb4b62e7074aff5f68f80dc7463c3 100644 (file)
@@ -299,7 +299,7 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
        return 0;
 }
 
-int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
+int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        struct sti_private *dev_priv = dev->dev_private;
        struct sti_compositor *compo = dev_priv->compo;
@@ -307,9 +307,9 @@ int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
 
        DRM_DEBUG_DRIVER("\n");
 
-       if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
+       if (sti_vtg_register_client(pipe == STI_MIXER_MAIN ?
                        compo->vtg_main : compo->vtg_aux,
-                       vtg_vblank_nb, crtc)) {
+                       vtg_vblank_nb, pipe)) {
                DRM_ERROR("Cannot register VTG notifier\n");
                return -EINVAL;
        }
@@ -318,7 +318,7 @@ int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
 }
 EXPORT_SYMBOL(sti_crtc_enable_vblank);
 
-void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
+void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
 {
        struct sti_private *priv = drm_dev->dev_private;
        struct sti_compositor *compo = priv->compo;
@@ -326,14 +326,14 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
 
        DRM_DEBUG_DRIVER("\n");
 
-       if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ?
+       if (sti_vtg_unregister_client(pipe == STI_MIXER_MAIN ?
                        compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
                DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
 
        /* free the resources of the pending requests */
-       if (compo->mixer[crtc]->pending_event) {
-               drm_vblank_put(drm_dev, crtc);
-               compo->mixer[crtc]->pending_event = NULL;
+       if (compo->mixer[pipe]->pending_event) {
+               drm_vblank_put(drm_dev, pipe);
+               compo->mixer[pipe]->pending_event = NULL;
        }
 }
 EXPORT_SYMBOL(sti_crtc_disable_vblank);
index 51963e6ddbe7df564939f56ee3730d8edbd565a9..3f2d89a3634d6dc6876a85900f91b9965479083e 100644 (file)
@@ -13,8 +13,8 @@ struct sti_mixer;
 
 int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
                  struct drm_plane *primary, struct drm_plane *cursor);
-int sti_crtc_enable_vblank(struct drm_device *dev, int crtc);
-void sti_crtc_disable_vblank(struct drm_device *dev, int crtc);
+int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
+void sti_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
 int sti_crtc_vblank_cb(struct notifier_block *nb,
                       unsigned long event, void *data);
 bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
index 6f4af6a8ba1bf7883b8c04db0c863018f092f275..f8469967a0bfbb43dcf5faee539ac9b5d5b5f328 100644 (file)
@@ -59,7 +59,7 @@ static void sti_atomic_complete(struct sti_private *private,
         */
 
        drm_atomic_helper_commit_modeset_disables(drm, state);
-       drm_atomic_helper_commit_planes(drm, state);
+       drm_atomic_helper_commit_planes(drm, state, false);
        drm_atomic_helper_commit_modeset_enables(drm, state);
 
        drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -201,7 +201,7 @@ static struct drm_driver sti_driver = {
        .dumb_destroy = drm_gem_dumb_destroy,
        .fops = &sti_driver_fops,
 
-       .get_vblank_counter = drm_vblank_count,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank = sti_crtc_enable_vblank,
        .disable_vblank = sti_crtc_disable_vblank,
 
index ddefb85dc4f72f4ee8d1fd6aa7678d5236144eda..b4af4ab9ce6b139a059aa2a47876d8e66b4877fd 100644 (file)
@@ -480,14 +480,12 @@ static const struct drm_plane_funcs tegra_primary_plane_funcs = {
 };
 
 static int tegra_plane_prepare_fb(struct drm_plane *plane,
-                                 struct drm_framebuffer *fb,
                                  const struct drm_plane_state *new_state)
 {
        return 0;
 }
 
 static void tegra_plane_cleanup_fb(struct drm_plane *plane,
-                                  struct drm_framebuffer *fb,
                                   const struct drm_plane_state *old_fb)
 {
 }
index 224a7dc8e4ed683a40bd0456045ca6e02fbaaa18..6aecb6647313daf4bb31cf5be122a0b792dd6704 100644 (file)
@@ -119,6 +119,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
         */
        if (msg->size < 1) {
                switch (msg->request & ~DP_AUX_I2C_MOT) {
+               case DP_AUX_I2C_WRITE_STATUS_UPDATE:
                case DP_AUX_I2C_WRITE:
                case DP_AUX_I2C_READ:
                        value = DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY;
@@ -149,7 +150,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
 
                break;
 
-       case DP_AUX_I2C_STATUS:
+       case DP_AUX_I2C_WRITE_STATUS_UPDATE:
                if (msg->request & DP_AUX_I2C_MOT)
                        value |= DPAUX_DP_AUXCTL_CMD_MOT_RQ;
                else
index 6d88cf1fcd1cd3e0a6335fa766d8ca578c0815e3..759e6af91e59cbe7f79b918dd9fbcdc346634ba9 100644 (file)
@@ -56,7 +56,7 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
         */
 
        drm_atomic_helper_commit_modeset_disables(drm, state);
-       drm_atomic_helper_commit_planes(drm, state);
+       drm_atomic_helper_commit_planes(drm, state, false);
        drm_atomic_helper_commit_modeset_enables(drm, state);
 
        drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -822,7 +822,8 @@ static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
        return NULL;
 }
 
-static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
+static u32 tegra_drm_get_vblank_counter(struct drm_device *drm,
+                                       unsigned int pipe)
 {
        struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
        struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -833,7 +834,7 @@ static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
        return tegra_dc_get_vblank_counter(dc);
 }
 
-static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+static int tegra_drm_enable_vblank(struct drm_device *drm, unsigned int pipe)
 {
        struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
        struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -846,7 +847,7 @@ static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
        return 0;
 }
 
-static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
+static void tegra_drm_disable_vblank(struct drm_device *drm, unsigned int pipe)
 {
        struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
        struct tegra_dc *dc = to_tegra_dc(crtc);
index 0f283a3b932cf5ed5d237701cae825df8a95d06d..876cad58b1f9ee1863d8de1bccf737b75b597ec6 100644 (file)
@@ -425,13 +425,13 @@ static void enable_vblank(struct drm_device *dev, bool enable)
                tilcdc_clear(dev, reg, mask);
 }
 
-static int tilcdc_enable_vblank(struct drm_device *dev, int crtc)
+static int tilcdc_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        enable_vblank(dev, true);
        return 0;
 }
 
-static void tilcdc_disable_vblank(struct drm_device *dev, int crtc)
+static void tilcdc_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        enable_vblank(dev, false);
 }
@@ -563,7 +563,7 @@ static struct drm_driver tilcdc_driver = {
        .irq_preinstall     = tilcdc_irq_preinstall,
        .irq_postinstall    = tilcdc_irq_postinstall,
        .irq_uninstall      = tilcdc_irq_uninstall,
-       .get_vblank_counter = drm_vblank_count,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank      = tilcdc_enable_vblank,
        .disable_vblank     = tilcdc_disable_vblank,
        .gem_free_object    = drm_gem_cma_free_object,
index ef8c500b4a006e4f63d848b6e5c43d81959c5439..286a785fab4f27d71d5ceea6166cd7b75986de5f 100644 (file)
@@ -102,6 +102,10 @@ typedef struct drm_via_private {
        uint32_t dma_diff;
 } drm_via_private_t;
 
+struct via_file_private {
+       struct list_head obj_list;
+};
+
 enum via_family {
   VIA_OTHER = 0,     /* Baseline */
   VIA_PRO_GROUP_A,   /* Another video engine and DMA commands */
@@ -136,9 +140,9 @@ extern int via_init_context(struct drm_device *dev, int context);
 extern int via_final_context(struct drm_device *dev, int context);
 
 extern int via_do_cleanup_map(struct drm_device *dev);
-extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
-extern int via_enable_vblank(struct drm_device *dev, int crtc);
-extern void via_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+extern int via_enable_vblank(struct drm_device *dev, unsigned int pipe);
+extern void via_disable_vblank(struct drm_device *dev, unsigned int pipe);
 
 extern irqreturn_t via_driver_irq_handler(int irq, void *arg);
 extern void via_driver_irq_preinstall(struct drm_device *dev);
index 1319433816d3cd4743c7eadbcf073d8703c6476b..ea8172c747a2655fd7987649e1eecc675c1111ee 100644 (file)
@@ -95,10 +95,11 @@ static unsigned time_diff(struct timeval *now, struct timeval *then)
                1000000 - (then->tv_usec - now->tv_usec);
 }
 
-u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
+u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        drm_via_private_t *dev_priv = dev->dev_private;
-       if (crtc != 0)
+
+       if (pipe != 0)
                return 0;
 
        return atomic_read(&dev_priv->vbl_received);
@@ -170,13 +171,13 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
        }
 }
 
-int via_enable_vblank(struct drm_device *dev, int crtc)
+int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        drm_via_private_t *dev_priv = dev->dev_private;
        u32 status;
 
-       if (crtc != 0) {
-               DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+       if (pipe != 0) {
+               DRM_ERROR("%s:  bad crtc %u\n", __func__, pipe);
                return -EINVAL;
        }
 
@@ -189,7 +190,7 @@ int via_enable_vblank(struct drm_device *dev, int crtc)
        return 0;
 }
 
-void via_disable_vblank(struct drm_device *dev, int crtc)
+void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        drm_via_private_t *dev_priv = dev->dev_private;
        u32 status;
@@ -200,8 +201,8 @@ void via_disable_vblank(struct drm_device *dev, int crtc)
        VIA_WRITE8(0x83d4, 0x11);
        VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
 
-       if (crtc != 0)
-               DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+       if (pipe != 0)
+               DRM_ERROR("%s:  bad crtc %u\n", __func__, pipe);
 }
 
 static int
index 2c7a25c71af2979c784b6cb4a87ee2088d4f8fbb..8e7493d50f1a5a9e3f1fcae7e24e0beb32b095f3 100644 (file)
@@ -1061,14 +1061,6 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
        }
        mutex_unlock(&dev->master_mutex);
 
-       /*
-        * Taking the drm_global_mutex after the TTM lock might deadlock
-        */
-       if (!(flags & DRM_UNLOCKED)) {
-               DRM_ERROR("Refusing locked ioctl access.\n");
-               return ERR_PTR(-EDEADLK);
-       }
-
        /*
         * Take the TTM lock. Possibly sleep waiting for the authenticating
         * master to become master again, or for a SIGTERM if the
index f19fd39b43e178ff0ba5ed0655b77e11f219dc68..a613bd4851ba84351d3e6181502511c34c9dec9d 100644 (file)
@@ -914,9 +914,9 @@ void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
                                uint32_t pitch,
                                uint32_t height);
-u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
-int vmw_enable_vblank(struct drm_device *dev, int crtc);
-void vmw_disable_vblank(struct drm_device *dev, int crtc);
+u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
+void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
 int vmw_kms_present(struct vmw_private *dev_priv,
                    struct drm_file *file_priv,
                    struct vmw_framebuffer *vfb,
index 15a6c01cd016b28dfe9cfec9d1d5515f1166f835..03ffab2a6a9cb7d235565e9130909f25d83d4dbf 100644 (file)
@@ -1263,7 +1263,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
 /**
  * Function called by DRM code called with vbl_lock held.
  */
-u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
+u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
        return 0;
 }
@@ -1271,7 +1271,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
 /**
  * Function called by DRM code called with vbl_lock held.
  */
-int vmw_enable_vblank(struct drm_device *dev, int crtc)
+int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
        return -ENOSYS;
 }
@@ -1279,7 +1279,7 @@ int vmw_enable_vblank(struct drm_device *dev, int crtc)
 /**
  * Function called by DRM code called with vbl_lock held.
  */
-void vmw_disable_vblank(struct drm_device *dev, int crtc)
+void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 }
 
index 9ef2e1f54ca47c2611008d872ab8dda0e1c7df5a..d3ad5347342c622fe736a255e5635e9476de8583 100644 (file)
@@ -183,12 +183,19 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
        }
 
        if (interlaced) {
-               dc_link_event(dc, DC_EVT_NL, 0, 3);
-               dc_link_event(dc, DC_EVT_EOL, 0, 2);
-               dc_link_event(dc, DC_EVT_NEW_DATA, 0, 1);
+               int addr;
+
+               if (dc->di)
+                       addr = 1;
+               else
+                       addr = 0;
+
+               dc_link_event(dc, DC_EVT_NL, addr, 3);
+               dc_link_event(dc, DC_EVT_EOL, addr, 2);
+               dc_link_event(dc, DC_EVT_NEW_DATA, addr, 1);
 
                /* Init template microcode */
-               dc_write_tmpl(dc, 0, WROD(0), 0, map, SYNC_WAVE, 0, 8, 1);
+               dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, 6, 1);
        } else {
                if (dc->di) {
                        dc_link_event(dc, DC_EVT_NL, 2, 3);
index 2970c6bb668ca9766eb93098adf442042e7c5f8f..359268e3a166851ba94303ee418f306817ba19ad 100644 (file)
@@ -71,6 +71,10 @@ enum di_sync_wave {
        DI_SYNC_HSYNC = 3,
        DI_SYNC_VSYNC = 4,
        DI_SYNC_DE = 6,
+
+       DI_SYNC_CNT1 = 2,       /* counter >= 2 only */
+       DI_SYNC_CNT4 = 5,       /* counter >= 5 only */
+       DI_SYNC_CNT5 = 6,       /* counter >= 6 only */
 };
 
 #define SYNC_WAVE 0
@@ -211,66 +215,59 @@ static void ipu_di_sync_config_interlaced(struct ipu_di *di,
                sig->mode.hback_porch + sig->mode.hfront_porch;
        u32 v_total = sig->mode.vactive + sig->mode.vsync_len +
                sig->mode.vback_porch + sig->mode.vfront_porch;
-       u32 reg;
        struct di_sync_config cfg[] = {
                {
-                       .run_count = h_total / 2 - 1,
-                       .run_src = DI_SYNC_CLK,
+                       /* 1: internal VSYNC for each frame */
+                       .run_count = v_total * 2 - 1,
+                       .run_src = 3,                   /* == counter 7 */
                }, {
-                       .run_count = h_total - 11,
+                       /* PIN2: HSYNC waveform */
+                       .run_count = h_total - 1,
                        .run_src = DI_SYNC_CLK,
-                       .cnt_down = 4,
+                       .cnt_polarity_gen_en = 1,
+                       .cnt_polarity_trigger_src = DI_SYNC_CLK,
+                       .cnt_down = sig->mode.hsync_len * 2,
                }, {
-                       .run_count = v_total * 2 - 1,
-                       .run_src = DI_SYNC_INT_HSYNC,
-                       .offset_count = 1,
-                       .offset_src = DI_SYNC_INT_HSYNC,
-                       .cnt_down = 4,
+                       /* PIN3: VSYNC waveform */
+                       .run_count = v_total - 1,
+                       .run_src = 4,                   /* == counter 7 */
+                       .cnt_polarity_gen_en = 1,
+                       .cnt_polarity_trigger_src = 4,  /* == counter 7 */
+                       .cnt_down = sig->mode.vsync_len * 2,
+                       .cnt_clr_src = DI_SYNC_CNT1,
                }, {
-                       .run_count = v_total / 2 - 1,
+                       /* 4: Field */
+                       .run_count = v_total / 2,
                        .run_src = DI_SYNC_HSYNC,
-                       .offset_count = sig->mode.vback_porch,
-                       .offset_src = DI_SYNC_HSYNC,
+                       .offset_count = h_total / 2,
+                       .offset_src = DI_SYNC_CLK,
                        .repeat_count = 2,
-                       .cnt_clr_src = DI_SYNC_VSYNC,
-               }, {
-                       .run_src = DI_SYNC_HSYNC,
-                       .repeat_count = sig->mode.vactive / 2,
-                       .cnt_clr_src = 4,
-               }, {
-                       .run_count = v_total - 1,
-                       .run_src = DI_SYNC_HSYNC,
+                       .cnt_clr_src = DI_SYNC_CNT1,
                }, {
-                       .run_count = v_total / 2 - 1,
+                       /* 5: Active lines */
                        .run_src = DI_SYNC_HSYNC,
-                       .offset_count = 9,
+                       .offset_count = (sig->mode.vsync_len +
+                                        sig->mode.vback_porch) / 2,
                        .offset_src = DI_SYNC_HSYNC,
-                       .repeat_count = 2,
-                       .cnt_clr_src = DI_SYNC_VSYNC,
+                       .repeat_count = sig->mode.vactive / 2,
+                       .cnt_clr_src = DI_SYNC_CNT4,
                }, {
+                       /* 6: Active pixel, referenced by DC */
                        .run_src = DI_SYNC_CLK,
-                       .offset_count = sig->mode.hback_porch,
+                       .offset_count = sig->mode.hsync_len +
+                                       sig->mode.hback_porch,
                        .offset_src = DI_SYNC_CLK,
                        .repeat_count = sig->mode.hactive,
-                       .cnt_clr_src = 5,
+                       .cnt_clr_src = DI_SYNC_CNT5,
                }, {
-                       .run_count = v_total - 1,
-                       .run_src = DI_SYNC_INT_HSYNC,
-                       .offset_count = v_total / 2,
-                       .offset_src = DI_SYNC_INT_HSYNC,
-                       .cnt_clr_src = DI_SYNC_HSYNC,
-                       .cnt_down = 4,
+                       /* 7: Half line HSYNC */
+                       .run_count = h_total / 2 - 1,
+                       .run_src = DI_SYNC_CLK,
                }
        };
 
        ipu_di_sync_config(di, cfg, 0, ARRAY_SIZE(cfg));
 
-       /* set gentime select and tag sel */
-       reg = ipu_di_read(di, DI_SW_GEN1(9));
-       reg &= 0x1FFFFFFF;
-       reg |= (3 - 1) << 29 | 0x00008000;
-       ipu_di_write(di, reg, DI_SW_GEN1(9));
-
        ipu_di_write(di, v_total / 2 - 1, DI_SCR_CONF);
 }
 
@@ -543,6 +540,29 @@ int ipu_di_adjust_videomode(struct ipu_di *di, struct videomode *mode)
 }
 EXPORT_SYMBOL_GPL(ipu_di_adjust_videomode);
 
+static u32 ipu_di_gen_polarity(int pin)
+{
+       switch (pin) {
+       case 1:
+               return DI_GEN_POLARITY_1;
+       case 2:
+               return DI_GEN_POLARITY_2;
+       case 3:
+               return DI_GEN_POLARITY_3;
+       case 4:
+               return DI_GEN_POLARITY_4;
+       case 5:
+               return DI_GEN_POLARITY_5;
+       case 6:
+               return DI_GEN_POLARITY_6;
+       case 7:
+               return DI_GEN_POLARITY_7;
+       case 8:
+               return DI_GEN_POLARITY_8;
+       }
+       return 0;
+}
+
 int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
 {
        u32 reg;
@@ -582,15 +602,8 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
 
                /* set y_sel = 1 */
                di_gen |= 0x10000000;
-               di_gen |= DI_GEN_POLARITY_5;
-               di_gen |= DI_GEN_POLARITY_8;
-
-               vsync_cnt = 7;
 
-               if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH)
-                       di_gen |= DI_GEN_POLARITY_3;
-               if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH)
-                       di_gen |= DI_GEN_POLARITY_2;
+               vsync_cnt = 3;
        } else {
                ipu_di_sync_config_noninterlaced(di, sig, div);
 
@@ -602,25 +615,13 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
                         */
                        if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3))
                                vsync_cnt = 6;
-
-               if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH) {
-                       if (sig->hsync_pin == 2)
-                               di_gen |= DI_GEN_POLARITY_2;
-                       else if (sig->hsync_pin == 4)
-                               di_gen |= DI_GEN_POLARITY_4;
-                       else if (sig->hsync_pin == 7)
-                               di_gen |= DI_GEN_POLARITY_7;
-               }
-               if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH) {
-                       if (sig->vsync_pin == 3)
-                               di_gen |= DI_GEN_POLARITY_3;
-                       else if (sig->vsync_pin == 6)
-                               di_gen |= DI_GEN_POLARITY_6;
-                       else if (sig->vsync_pin == 8)
-                               di_gen |= DI_GEN_POLARITY_8;
-               }
        }
 
+       if (sig->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH)
+               di_gen |= ipu_di_gen_polarity(sig->hsync_pin);
+       if (sig->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH)
+               di_gen |= ipu_di_gen_polarity(sig->vsync_pin);
+
        if (sig->clk_pol)
                di_gen |= DI_GEN_POLARITY_DISP_CLK;
 
index 21060668fd25ba59b514c6a5a915a542ef8da159..1acbe20143d422515552d3d5a9d5a36d94e6cae3 100644 (file)
 /*
+ * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
+ *
  * Copyright (c) 2010 Red Hat Inc.
  * Author : Dave Airlie <airlied@redhat.com>
  *
+ * Copyright (c) 2015 Lukas Wunner <lukas@wunner.de>
  *
- * Licensed under GPLv2
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
  *
- * Switcher interface - methods require for ATPX and DCM
- * - switchto - this throws the output MUX switch
- * - discrete_set_power - sets the power state for the discrete card
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS
+ * IN THE SOFTWARE.
  *
- * GPU driver interface
- * - set_gpu_state - this should do the equiv of s/r for the card
- *                 - this should *not* set the discrete power state
- * - switch_check  - check if the device is in a position to switch now
  */
 
 #define pr_fmt(fmt) "vga_switcheroo: " fmt
 
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/fs.h>
+#include <linux/console.h>
 #include <linux/debugfs.h>
 #include <linux/fb.h>
-
+#include <linux/fs.h>
+#include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/console.h>
-#include <linux/vga_switcheroo.h>
 #include <linux/pm_runtime.h>
-
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
 #include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+
+/**
+ * DOC: Overview
+ *
+ * vga_switcheroo is the Linux subsystem for laptop hybrid graphics.
+ * These come in two flavors:
+ *
+ * * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
+ * * muxless: Dual GPUs but only one of them is connected to outputs.
+ *     The other one is merely used to offload rendering, its results
+ *     are copied over PCIe into the framebuffer. On Linux this is
+ *     supported with DRI PRIME.
+ *
+ * Hybrid graphics started to appear in the late Naughties and were initially
+ * all muxed. Newer laptops moved to a muxless architecture for cost reasons.
+ * A notable exception is the MacBook Pro which continues to use a mux.
+ * Muxes come with varying capabilities: Some switch only the panel, others
+ * can also switch external displays. Some switch all display pins at once
+ * while others can switch just the DDC lines. (To allow EDID probing
+ * for the inactive GPU.) Also, muxes are often used to cut power to the
+ * discrete GPU while it is not used.
+ *
+ * DRM drivers register GPUs with vga_switcheroo, these are heretoforth called
+ * clients. The mux is called the handler. Muxless machines also register a
+ * handler to control the power state of the discrete GPU, its ->switchto
+ * callback is a no-op for obvious reasons. The discrete GPU is often equipped
+ * with an HDA controller for the HDMI/DP audio signal, this will also
+ * register as a client so that vga_switcheroo can take care of the correct
+ * suspend/resume order when changing the discrete GPU's power state. In total
+ * there can thus be up to three clients: Two vga clients (GPUs) and one audio
+ * client (on the discrete GPU). The code is mostly prepared to support
+ * machines with more than two GPUs should they become available.
+ * The GPU to which the outputs are currently switched is called the
+ * active client in vga_switcheroo parlance. The GPU not in use is the
+ * inactive client.
+ */
 
+/**
+ * struct vga_switcheroo_client - registered client
+ * @pdev: client pci device
+ * @fb_info: framebuffer to which console is remapped on switching
+ * @pwr_state: current power state
+ * @ops: client callbacks
+ * @id: client identifier, see enum vga_switcheroo_client_id.
+ *     Determining the id requires the handler, so GPUs are initially
+ *     assigned -1 and later given their true id in vga_switcheroo_enable()
+ * @active: whether the outputs are currently switched to this client
+ * @driver_power_control: whether power state is controlled by the driver's
+ *     runtime pm. If true, writing ON and OFF to the vga_switcheroo debugfs
+ *     interface is a no-op so as not to interfere with runtime pm
+ * @list: client list
+ *
+ * Registered client. A client can be either a GPU or an audio device on a GPU.
+ * For audio clients, the @fb_info, @active and @driver_power_control members
+ * are bogus.
+ */
 struct vga_switcheroo_client {
        struct pci_dev *pdev;
        struct fb_info *fb_info;
@@ -44,10 +108,28 @@ struct vga_switcheroo_client {
        struct list_head list;
 };
 
+/*
+ * protects access to struct vgasr_priv
+ */
 static DEFINE_MUTEX(vgasr_mutex);
 
+/**
+ * struct vgasr_priv - vga_switcheroo private data
+ * @active: whether vga_switcheroo is enabled.
+ *     Prerequisite is the registration of two GPUs and a handler
+ * @delayed_switch_active: whether a delayed switch is pending
+ * @delayed_client_id: client to which a delayed switch is pending
+ * @debugfs_root: directory for vga_switcheroo debugfs interface
+ * @switch_file: file for vga_switcheroo debugfs interface
+ * @registered_clients: number of registered GPUs
+ *     (counting only vga clients, not audio clients)
+ * @clients: list of registered clients
+ * @handler: registered handler
+ *
+ * vga_switcheroo private data. Currently only one vga_switcheroo instance
+ * per system is supported.
+ */
 struct vgasr_priv {
-
        bool active;
        bool delayed_switch_active;
        enum vga_switcheroo_client_id delayed_client_id;
@@ -103,6 +185,15 @@ static void vga_switcheroo_enable(void)
        vgasr_priv.active = true;
 }
 
+/**
+ * vga_switcheroo_register_handler() - register handler
+ * @handler: handler callbacks
+ *
+ * Register handler. Enable vga_switcheroo if two vga clients have already
+ * registered.
+ *
+ * Return: 0 on success, -EINVAL if a handler was already registered.
+ */
 int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
 {
        mutex_lock(&vgasr_mutex);
@@ -121,6 +212,11 @@ int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
 }
 EXPORT_SYMBOL(vga_switcheroo_register_handler);
 
+/**
+ * vga_switcheroo_unregister_handler() - unregister handler
+ *
+ * Unregister handler. Disable vga_switcheroo.
+ */
 void vga_switcheroo_unregister_handler(void)
 {
        mutex_lock(&vgasr_mutex);
@@ -164,6 +260,19 @@ static int register_client(struct pci_dev *pdev,
        return 0;
 }
 
+/**
+ * vga_switcheroo_register_client - register vga client
+ * @pdev: client pci device
+ * @ops: client callbacks
+ * @driver_power_control: whether power state is controlled by the driver's
+ *     runtime pm
+ *
+ * Register vga client (GPU). Enable vga_switcheroo if another GPU and a
+ * handler have already registered. The power state of the client is assumed
+ * to be ON.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation error.
+ */
 int vga_switcheroo_register_client(struct pci_dev *pdev,
                                   const struct vga_switcheroo_client_ops *ops,
                                   bool driver_power_control)
@@ -174,11 +283,22 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
 }
 EXPORT_SYMBOL(vga_switcheroo_register_client);
 
+/**
+ * vga_switcheroo_register_audio_client - register audio client
+ * @pdev: client pci device
+ * @ops: client callbacks
+ * @id: client identifier, see enum vga_switcheroo_client_id
+ *
+ * Register audio client (audio device on a GPU). The power state of the
+ * client is assumed to be ON.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation error.
+ */
 int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
                                         const struct vga_switcheroo_client_ops *ops,
-                                        int id, bool active)
+                                        int id)
 {
-       return register_client(pdev, ops, id | ID_BIT_AUDIO, active, false);
+       return register_client(pdev, ops, id | ID_BIT_AUDIO, false, false);
 }
 EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
 
@@ -210,24 +330,44 @@ find_active_client(struct list_head *head)
        struct vga_switcheroo_client *client;
 
        list_for_each_entry(client, head, list)
-               if (client->active && client_is_vga(client))
+               if (client->active)
                        return client;
        return NULL;
 }
 
+/**
+ * vga_switcheroo_get_client_state() - obtain power state of a given client
+ * @pdev: client pci device
+ *
+ * Obtain power state of a given client as seen from vga_switcheroo.
+ * The function is only called from hda_intel.c.
+ *
+ * Return: Power state.
+ */
 int vga_switcheroo_get_client_state(struct pci_dev *pdev)
 {
        struct vga_switcheroo_client *client;
+       enum vga_switcheroo_state ret;
 
+       mutex_lock(&vgasr_mutex);
        client = find_client_from_pci(&vgasr_priv.clients, pdev);
        if (!client)
-               return VGA_SWITCHEROO_NOT_FOUND;
-       if (!vgasr_priv.active)
-               return VGA_SWITCHEROO_INIT;
-       return client->pwr_state;
+               ret = VGA_SWITCHEROO_NOT_FOUND;
+       else if (!vgasr_priv.active)
+               ret = VGA_SWITCHEROO_INIT;
+       else
+               ret = client->pwr_state;
+       mutex_unlock(&vgasr_mutex);
+       return ret;
 }
 EXPORT_SYMBOL(vga_switcheroo_get_client_state);
 
+/**
+ * vga_switcheroo_unregister_client() - unregister client
+ * @pdev: client pci device
+ *
+ * Unregister client. Disable vga_switcheroo if this is a vga client (GPU).
+ */
 void vga_switcheroo_unregister_client(struct pci_dev *pdev)
 {
        struct vga_switcheroo_client *client;
@@ -249,6 +389,14 @@ void vga_switcheroo_unregister_client(struct pci_dev *pdev)
 }
 EXPORT_SYMBOL(vga_switcheroo_unregister_client);
 
+/**
+ * vga_switcheroo_client_fb_set() - set framebuffer of a given client
+ * @pdev: client pci device
+ * @info: framebuffer
+ *
+ * Set framebuffer of a given client. The console will be remapped to this
+ * on switching.
+ */
 void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
                                 struct fb_info *info)
 {
@@ -262,6 +410,42 @@ void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
 }
 EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
 
+/**
+ * DOC: Manual switching and manual power control
+ *
+ * In this mode of use, the file /sys/kernel/debug/vgaswitcheroo/switch
+ * can be read to retrieve the current vga_switcheroo state and commands
+ * can be written to it to change the state. The file appears as soon as
+ * two GPU drivers and one handler have registered with vga_switcheroo.
+ * The following commands are understood:
+ *
+ * * OFF: Power off the device not in use.
+ * * ON: Power on the device not in use.
+ * * IGD: Switch to the integrated graphics device.
+ *     Power on the integrated GPU if necessary, power off the discrete GPU.
+ *     Prerequisite is that no user space processes (e.g. Xorg, alsactl)
+ *     have opened device files of the GPUs or the audio client. If the
+ *     switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
+ *     and /dev/snd/controlC1 to identify processes blocking the switch.
+ * * DIS: Switch to the discrete graphics device.
+ * * DIGD: Delayed switch to the integrated graphics device.
+ *     This will perform the switch once the last user space process has
+ *     closed the device files of the GPUs and the audio client.
+ * * DDIS: Delayed switch to the discrete graphics device.
+ * * MIGD: Mux-only switch to the integrated graphics device.
+ *     Does not remap console or change the power state of either gpu.
+ *     If the integrated GPU is currently off, the screen will turn black.
+ *     If it is on, the screen will show whatever happens to be in VRAM.
+ *     Either way, the user has to blindly enter the command to switch back.
+ * * MDIS: Mux-only switch to the discrete graphics device.
+ *
+ * For GPUs whose power state is controlled by the driver's runtime pm,
+ * the ON and OFF commands are a no-op (see next section).
+ *
+ * For muxless machines, the IGD/DIS, DIGD/DDIS and MIGD/MDIS commands
+ * should not be used.
+ */
+
 static int vga_switcheroo_show(struct seq_file *m, void *v)
 {
        struct vga_switcheroo_client *client;
@@ -559,6 +743,16 @@ fail:
        return -1;
 }
 
+/**
+ * vga_switcheroo_process_delayed_switch() - helper for delayed switching
+ *
+ * Process a delayed switch if one is pending. DRM drivers should call this
+ * from their ->lastclose callback.
+ *
+ * Return: 0 on success. -EINVAL if no delayed switch is pending, if the client
+ * has unregistered in the meantime or if there are other clients blocking the
+ * switch. If the actual switch fails, an error is reported and 0 is returned.
+ */
 int vga_switcheroo_process_delayed_switch(void)
 {
        struct vga_switcheroo_client *client;
@@ -589,6 +783,39 @@ err:
 }
 EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
 
+/**
+ * DOC: Driver power control
+ *
+ * In this mode of use, the discrete GPU automatically powers up and down at
+ * the discretion of the driver's runtime pm. On muxed machines, the user may
+ * still influence the muxer state by way of the debugfs interface, however
+ * the ON and OFF commands become a no-op for the discrete GPU.
+ *
+ * This mode is the default on Nvidia HybridPower/Optimus and ATI PowerXpress.
+ * Specifying nouveau.runpm=0, radeon.runpm=0 or amdgpu.runpm=0 on the kernel
+ * command line disables it.
+ *
+ * When the driver decides to power up or down, it notifies vga_switcheroo
+ * thereof so that it can (a) power the audio device on the GPU up or down,
+ * and (b) update its internal power state representation for the device.
+ * This is achieved by vga_switcheroo_set_dynamic_switch().
+ *
+ * After the GPU has been suspended, the handler needs to be called to cut
+ * power to the GPU. Likewise it needs to reinstate power before the GPU
+ * can resume. This is achieved by vga_switcheroo_init_domain_pm_ops(),
+ * which augments the GPU's suspend/resume functions by the requisite
+ * calls to the handler.
+ *
+ * When the audio device resumes, the GPU needs to be woken. This is achieved
+ * by vga_switcheroo_init_domain_pm_optimus_hdmi_audio(), which augments the
+ * audio device's resume function.
+ *
+ * On muxed machines, if the mux is initially switched to the discrete GPU,
+ * the user ends up with a black screen when the GPU powers down after boot.
+ * As a workaround, the mux is forced to the integrated GPU on runtime suspend,
+ * cf. https://bugs.freedesktop.org/show_bug.cgi?id=75917
+ */
+
 static void vga_switcheroo_power_switch(struct pci_dev *pdev,
                                        enum vga_switcheroo_state state)
 {
@@ -607,22 +834,32 @@ static void vga_switcheroo_power_switch(struct pci_dev *pdev,
        vgasr_priv.handler->power_state(client->id, state);
 }
 
-/* force a PCI device to a certain state - mainly to turn off audio clients */
-
+/**
+ * vga_switcheroo_set_dynamic_switch() - helper for driver power control
+ * @pdev: client pci device
+ * @dynamic: new power state
+ *
+ * Helper for GPUs whose power state is controlled by the driver's runtime pm.
+ * When the driver decides to power up or down, it notifies vga_switcheroo
+ * thereof using this helper so that it can (a) power the audio device on
+ * the GPU up or down, and (b) update its internal power state representation
+ * for the device.
+ */
 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev,
                                       enum vga_switcheroo_state dynamic)
 {
        struct vga_switcheroo_client *client;
 
+       mutex_lock(&vgasr_mutex);
        client = find_client_from_pci(&vgasr_priv.clients, pdev);
-       if (!client)
-               return;
-
-       if (!client->driver_power_control)
+       if (!client || !client->driver_power_control) {
+               mutex_unlock(&vgasr_mutex);
                return;
+       }
 
        client->pwr_state = dynamic;
        set_audio_state(client->id, dynamic);
+       mutex_unlock(&vgasr_mutex);
 }
 EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch);
 
@@ -635,9 +872,11 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
        ret = dev->bus->pm->runtime_suspend(dev);
        if (ret)
                return ret;
+       mutex_lock(&vgasr_mutex);
        if (vgasr_priv.handler->switchto)
                vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
        vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
+       mutex_unlock(&vgasr_mutex);
        return 0;
 }
 
@@ -646,7 +885,9 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        int ret;
 
+       mutex_lock(&vgasr_mutex);
        vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
+       mutex_unlock(&vgasr_mutex);
        ret = dev->bus->pm->runtime_resume(dev);
        if (ret)
                return ret;
@@ -654,8 +895,18 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
        return 0;
 }
 
-/* this version is for the case where the power switch is separate
-   to the device being powered down. */
+/**
+ * vga_switcheroo_init_domain_pm_ops() - helper for driver power control
+ * @dev: vga client device
+ * @domain: power domain
+ *
+ * Helper for GPUs whose power state is controlled by the driver's runtime pm.
+ * After the GPU has been suspended, the handler needs to be called to cut
+ * power to the GPU. Likewise it needs to reinstate power before the GPU
+ * can resume. To this end, this helper augments the suspend/resume functions
+ * by the requisite calls to the handler. It needs only be called on platforms
+ * where the power switch is separate to the device being powered down.
+ */
 int vga_switcheroo_init_domain_pm_ops(struct device *dev,
                                      struct dev_pm_domain *domain)
 {
@@ -682,33 +933,50 @@ EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
 static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+       struct vga_switcheroo_client *client;
+       struct device *video_dev = NULL;
        int ret;
-       struct vga_switcheroo_client *client, *found = NULL;
 
        /* we need to check if we have to switch back on the video
           device so the audio device can come back */
+       mutex_lock(&vgasr_mutex);
        list_for_each_entry(client, &vgasr_priv.clients, list) {
                if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) &&
                    client_is_vga(client)) {
-                       found = client;
-                       ret = pm_runtime_get_sync(&client->pdev->dev);
-                       if (ret) {
-                               if (ret != 1)
-                                       return ret;
-                       }
+                       video_dev = &client->pdev->dev;
                        break;
                }
        }
+       mutex_unlock(&vgasr_mutex);
+
+       if (video_dev) {
+               ret = pm_runtime_get_sync(video_dev);
+               if (ret && ret != 1)
+                       return ret;
+       }
        ret = dev->bus->pm->runtime_resume(dev);
 
        /* put the reference for the gpu */
-       if (found) {
-               pm_runtime_mark_last_busy(&found->pdev->dev);
-               pm_runtime_put_autosuspend(&found->pdev->dev);
+       if (video_dev) {
+               pm_runtime_mark_last_busy(video_dev);
+               pm_runtime_put_autosuspend(video_dev);
        }
        return ret;
 }
 
+/**
+ * vga_switcheroo_init_domain_pm_optimus_hdmi_audio() - helper for driver
+ *     power control
+ * @dev: audio client device
+ * @domain: power domain
+ *
+ * Helper for GPUs whose power state is controlled by the driver's runtime pm.
+ * When the audio device resumes, the GPU needs to be woken. This helper
+ * augments the audio device's resume function to do that.
+ *
+ * Return: 0 on success, -EINVAL if no power management operations are
+ * defined for this device.
+ */
 int
 vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
                                                 struct dev_pm_domain *domain)
index a0b4334561078bbd0a2d10a1a2816a372c26f33b..3166e4bc4eb6daea9a9fcad4eb5fce4b62b1b418 100644 (file)
@@ -531,7 +531,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
                return false;
 
        /* Allocate structure */
-       vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
+       vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL);
        if (vgadev == NULL) {
                pr_err("failed to allocate pci device\n");
                /*
@@ -541,8 +541,6 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
                return false;
        }
 
-       memset(vgadev, 0, sizeof(*vgadev));
-
        /* Take lock & check for duplicates */
        spin_lock_irqsave(&vga_lock, flags);
        if (vgadev_find(pdev) != NULL) {
index 8b5ce7c5d9bbfc33250ea8d15621cfd1d97be3f4..3dc56d3413b760e2580da8ccf7a6778309e72f43 100644 (file)
@@ -412,7 +412,7 @@ struct drm_driver {
        /**
         * get_vblank_counter - get raw hardware vblank counter
         * @dev: DRM device
-        * @crtc: counter to fetch
+        * @pipe: counter to fetch
         *
         * Driver callback for fetching a raw hardware vblank counter for @crtc.
         * If a device doesn't have a hardware counter, the driver can simply
@@ -426,12 +426,12 @@ struct drm_driver {
         * RETURNS
         * Raw vblank counter value.
         */
-       u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
+       u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
 
        /**
         * enable_vblank - enable vblank interrupt events
         * @dev: DRM device
-        * @crtc: which irq to enable
+        * @pipe: which irq to enable
         *
         * Enable vblank interrupts for @crtc.  If the device doesn't have
         * a hardware vblank counter, this routine should be a no-op, since
@@ -441,18 +441,18 @@ struct drm_driver {
         * Zero on success, appropriate errno if the given @crtc's vblank
         * interrupt cannot be enabled.
         */
-       int (*enable_vblank) (struct drm_device *dev, int crtc);
+       int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
 
        /**
         * disable_vblank - disable vblank interrupt events
         * @dev: DRM device
-        * @crtc: which irq to enable
+        * @pipe: which irq to enable
         *
         * Disable vblank interrupts for @crtc.  If the device doesn't have
         * a hardware vblank counter, this routine should be a no-op, since
         * interrupts will have to stay on to keep the count accurate.
         */
-       void (*disable_vblank) (struct drm_device *dev, int crtc);
+       void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
 
        /**
         * Called by \c drm_device_is_agp.  Typically used to determine if a
@@ -474,7 +474,7 @@ struct drm_driver {
         * optional accurate ktime_get timestamp of when position was measured.
         *
         * \param dev  DRM device.
-        * \param crtc Id of the crtc to query.
+        * \param pipe Id of the crtc to query.
         * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0).
         * \param *vpos Target location for current vertical scanout position.
         * \param *hpos Target location for current horizontal scanout position.
@@ -482,6 +482,7 @@ struct drm_driver {
         *               scanout position query. Can be NULL to skip timestamp.
         * \param *etime Target location for timestamp taken immediately after
         *               scanout position query. Can be NULL to skip timestamp.
+        * \param mode Current display timings.
         *
         * Returns vpos as a positive number while in active scanout area.
         * Returns vpos as a negative number inside vblank, counting the number
@@ -497,10 +498,10 @@ struct drm_driver {
         * but unknown small number of scanlines wrt. real scanout position.
         *
         */
-       int (*get_scanout_position) (struct drm_device *dev, int crtc,
-                                    unsigned int flags,
-                                    int *vpos, int *hpos, ktime_t *stime,
-                                    ktime_t *etime);
+       int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
+                                    unsigned int flags, int *vpos, int *hpos,
+                                    ktime_t *stime, ktime_t *etime,
+                                    const struct drm_display_mode *mode);
 
        /**
         * Called by \c drm_get_last_vbltimestamp. Should return a precise
@@ -516,7 +517,7 @@ struct drm_driver {
         * to the OpenML OML_sync_control extension specification.
         *
         * \param dev dev DRM device handle.
-        * \param crtc crtc for which timestamp should be returned.
+        * \param pipe crtc for which timestamp should be returned.
         * \param *max_error Maximum allowable timestamp error in nanoseconds.
         *                   Implementation should strive to provide timestamp
         *                   with an error of at most *max_error nanoseconds.
@@ -532,7 +533,7 @@ struct drm_driver {
         * negative number on failure. A positive status code on success,
         * which describes how the vblank_time timestamp was computed.
         */
-       int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
+       int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
                                     int *max_error,
                                     struct timeval *vblank_time,
                                     unsigned flags);
@@ -701,6 +702,8 @@ struct drm_vblank_crtc {
        u32 last_wait;                  /* Last vblank seqno waited per CRTC */
        unsigned int inmodeset;         /* Display driver is setting mode */
        unsigned int pipe;              /* crtc index */
+       int framedur_ns;                /* frame/field duration in ns */
+       int linedur_ns;                 /* line duration in ns */
        bool enabled;                   /* so we don't call enable more than
                                           once per disable */
 };
@@ -906,6 +909,8 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
 /* Misc. IOCTL support (drm_ioctl.c) */
 int drm_noop(struct drm_device *dev, void *data,
             struct drm_file *file_priv);
+int drm_invalid_op(struct drm_device *dev, void *data,
+                  struct drm_file *file_priv);
 
 /* Cache management (drm_cache.c) */
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
@@ -924,10 +929,12 @@ extern int drm_irq_uninstall(struct drm_device *dev);
 extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
 extern int drm_wait_vblank(struct drm_device *dev, void *data,
                           struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, int pipe);
+extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
 extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
                                     struct timeval *vblanktime);
+extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+                                         struct timeval *vblanktime);
 extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
                                  struct drm_pending_vblank_event *e);
 extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
@@ -946,12 +953,12 @@ extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
 extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
 extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
 
 extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
                                                 unsigned int pipe, int *max_error,
                                                 struct timeval *vblank_time,
                                                 unsigned flags,
-                                                const struct drm_crtc *refcrtc,
                                                 const struct drm_display_mode *mode);
 extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
                                            const struct drm_display_mode *mode);
index 055dc058d147247c4a1652f98db20cc869a731dc..193ef19dfc5c54f061bff939150bf384be734ca6 100644 (file)
@@ -12,9 +12,6 @@
 struct drm_device;
 struct drm_file;
 
-#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \
-                                             defined(MODULE)))
-
 struct drm_agp_head {
        struct agp_kern_info agp_info;
        struct list_head memory;
@@ -28,7 +25,7 @@ struct drm_agp_head {
        unsigned long page_mask;
 };
 
-#if __OS_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
 
 void drm_free_agp(struct agp_memory * handle, int pages);
 int drm_bind_agp(struct agp_memory * handle, unsigned int start);
@@ -66,7 +63,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
 int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
-#else /* __OS_HAS_AGP */
+#else /* CONFIG_AGP */
 
 static inline void drm_free_agp(struct agp_memory * handle, int pages)
 {
@@ -105,95 +102,47 @@ static inline int drm_agp_acquire(struct drm_device *dev)
        return -ENODEV;
 }
 
-static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
-                                       struct drm_file *file_priv)
-{
-       return -ENODEV;
-}
-
 static inline int drm_agp_release(struct drm_device *dev)
 {
        return -ENODEV;
 }
 
-static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
-                                       struct drm_file *file_priv)
-{
-       return -ENODEV;
-}
-
 static inline int drm_agp_enable(struct drm_device *dev,
                                 struct drm_agp_mode mode)
 {
        return -ENODEV;
 }
 
-static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
-                                      struct drm_file *file_priv)
-{
-       return -ENODEV;
-}
-
 static inline int drm_agp_info(struct drm_device *dev,
                               struct drm_agp_info *info)
 {
        return -ENODEV;
 }
 
-static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
-                                    struct drm_file *file_priv)
-{
-       return -ENODEV;
-}
-
 static inline int drm_agp_alloc(struct drm_device *dev,
                                struct drm_agp_buffer *request)
 {
        return -ENODEV;
 }
 
-static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
-                                     struct drm_file *file_priv)
-{
-       return -ENODEV;
-}
-
 static inline int drm_agp_free(struct drm_device *dev,
                               struct drm_agp_buffer *request)
 {
        return -ENODEV;
 }
 
-static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
-                                    struct drm_file *file_priv)
-{
-       return -ENODEV;
-}
-
 static inline int drm_agp_unbind(struct drm_device *dev,
                                 struct drm_agp_binding *request)
 {
        return -ENODEV;
 }
 
-static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
-                                      struct drm_file *file_priv)
-{
-       return -ENODEV;
-}
-
 static inline int drm_agp_bind(struct drm_device *dev,
                               struct drm_agp_binding *request)
 {
        return -ENODEV;
 }
 
-static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
-                                    struct drm_file *file_priv)
-{
-       return -ENODEV;
-}
-
-#endif /* __OS_HAS_AGP */
+#endif /* CONFIG_AGP */
 
 #endif /* _DRM_AGPSUPPORT_H_ */
index 11266d147a29409b12718a1b575520579d588fde..8cba54a2a0a0f78a198ba9ae82a96f6b3f6631f8 100644 (file)
@@ -30,6 +30,8 @@
 
 #include <drm/drm_crtc.h>
 
+struct drm_atomic_state;
+
 int drm_atomic_helper_check_modeset(struct drm_device *dev,
                                struct drm_atomic_state *state);
 int drm_atomic_helper_check_planes(struct drm_device *dev,
@@ -55,7 +57,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
                                     struct drm_atomic_state *state);
 void drm_atomic_helper_commit_planes(struct drm_device *dev,
-                                    struct drm_atomic_state *state);
+                                    struct drm_atomic_state *state,
+                                    bool active_only);
 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
                                      struct drm_atomic_state *old_state);
 void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state);
@@ -72,7 +75,11 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane,
                                   uint32_t src_x, uint32_t src_y,
                                   uint32_t src_w, uint32_t src_h);
 int drm_atomic_helper_disable_plane(struct drm_plane *plane);
+int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
+               struct drm_plane_state *plane_state);
 int drm_atomic_helper_set_config(struct drm_mode_set *set);
+int __drm_atomic_helper_set_config(struct drm_mode_set *set,
+               struct drm_atomic_state *state);
 
 int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
                                        struct drm_property *property,
@@ -117,6 +124,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
                                           struct drm_connector_state *state);
 struct drm_connector_state *
 drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+                                 struct drm_modeset_acquire_ctx *ctx);
 void
 __drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
                                            struct drm_connector_state *state);
index faaeff7db6847323a19b8112c64a9ccb3fcb5dfc..33ddedd3603871545cdcb009f9f48d08e88bb79a 100644 (file)
@@ -86,10 +86,12 @@ static inline uint64_t I642U64(int64_t val)
 }
 
 /* rotation property bits */
+#define DRM_ROTATE_MASK 0x0f
 #define DRM_ROTATE_0   0
 #define DRM_ROTATE_90  1
 #define DRM_ROTATE_180 2
 #define DRM_ROTATE_270 3
+#define DRM_REFLECT_MASK (~DRM_ROTATE_MASK)
 #define DRM_REFLECT_X  4
 #define DRM_REFLECT_Y  5
 
@@ -210,8 +212,6 @@ struct drm_framebuffer {
        int flags;
        uint32_t pixel_format; /* fourcc format */
        struct list_head filp_head;
-       /* if you are using the helper */
-       void *helper_private;
 };
 
 struct drm_property_blob {
@@ -415,9 +415,6 @@ struct drm_crtc_funcs {
  * @funcs: CRTC control functions
  * @gamma_size: size of gamma ramp
  * @gamma_store: gamma ramp values
- * @framedur_ns: precise frame timing
- * @linedur_ns: precise line timing
- * @pixeldur_ns: precise pixel timing
  * @helper_private: mid-layer private data
  * @properties: property tracking for this CRTC
  * @state: current atomic state for this CRTC
@@ -470,9 +467,6 @@ struct drm_crtc {
        uint32_t gamma_size;
        uint16_t *gamma_store;
 
-       /* Constants needed for precise vblank and swap timestamping. */
-       int framedur_ns, linedur_ns, pixeldur_ns;
-
        /* if you are using the helper */
        const void *helper_private;
 
@@ -913,7 +907,6 @@ struct drm_bridge_funcs {
  * @next: the next bridge in the encoder chain
  * @of_node: device node pointer to the bridge
  * @list: to keep track of all added bridges
- * @base: base mode object
  * @funcs: control functions
  * @driver_private: pointer to the bridge driver's internal context
  */
@@ -1390,7 +1383,7 @@ extern int drm_property_add_enum(struct drm_property *property, int index,
 extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
 extern int drm_mode_create_tv_properties(struct drm_device *dev,
                                         unsigned int num_modes,
-                                        char *modes[]);
+                                        const char * const modes[]);
 extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
 extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
 extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
index 0212d139a480909a216da244ffd8aadf6effa630..bb9d0deca07cc30d0a031cf8ffd3b522b3318d8b 100644 (file)
@@ -46,7 +46,7 @@
 
 #define DP_AUX_I2C_WRITE               0x0
 #define DP_AUX_I2C_READ                        0x1
-#define DP_AUX_I2C_STATUS              0x2
+#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2
 #define DP_AUX_I2C_MOT                 0x4
 #define DP_AUX_NATIVE_WRITE            0x8
 #define DP_AUX_NATIVE_READ             0x9
@@ -638,6 +638,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
                (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
 }
 
+static inline bool
+drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+       return dpcd[DP_DPCD_REV] >= 0x12 &&
+               dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
+}
+
 /*
  * DisplayPort AUX channel
  */
index 53c53c459b15c8207997da61234d0ab9ea2805ae..2af97691e8781382d3b2484d3bffc4eb5a43a247 100644 (file)
@@ -326,9 +326,8 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
 int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
 int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
 int drm_av_sync_delay(struct drm_connector *connector,
-                     struct drm_display_mode *mode);
-struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
-                                    struct drm_display_mode *mode);
+                     const struct drm_display_mode *mode);
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder);
 int drm_load_edid_firmware(struct drm_connector *connector);
 
 int
index dbab4622b58f7195b089d8fcadd27f31aec36e02..87b090c4b7306556dfe2096b2040890736632fab 100644 (file)
@@ -104,6 +104,20 @@ struct drm_fb_helper_connector {
        struct drm_connector *connector;
 };
 
+/**
+ * struct drm_fb_helper - helper to emulate fbdev on top of kms
+ * @fb:  Scanout framebuffer object
+ * @dev:  DRM device
+ * @crtc_count: number of possible CRTCs
+ * @crtc_info: per-CRTC helper state (mode, x/y offset, etc)
+ * @connector_count: number of connected connectors
+ * @connector_info_alloc_count: size of connector_info
+ * @funcs: driver callbacks for fb helper
+ * @fbdev: emulated fbdev device info struct
+ * @pseudo_palette: fake palette of 16 colors
+ * @kernel_fb_list: list_head in kernel_fb_helper_list
+ * @delayed_hotplug: was there a hotplug while kms master active?
+ */
 struct drm_fb_helper {
        struct drm_framebuffer *fb;
        struct drm_device *dev;
@@ -120,6 +134,17 @@ struct drm_fb_helper {
        /* we got a hotplug but fbdev wasn't running the console
           delay until next set_par */
        bool delayed_hotplug;
+
+       /**
+        * @atomic:
+        *
+        * Use atomic updates for restore_fbdev_mode(), etc.  This defaults to
+        * true if driver has DRIVER_ATOMIC feature flag, but drivers can
+        * override it to true after drm_fb_helper_init() if they support atomic
+        * modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper
+        * does not require ASYNC commits).
+        */
+       bool atomic;
 };
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -136,7 +161,7 @@ int drm_fb_helper_set_par(struct fb_info *info);
 int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
                            struct fb_info *info);
 
-bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
 
 struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
 void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
@@ -226,10 +251,10 @@ static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
        return 0;
 }
 
-static inline bool
+static inline int
 drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
 {
-       return true;
+       return 0;
 }
 
 static inline struct fb_info *
index 5dd18bfdf6017daed071135d5cbbc20111f16b34..94938d89347cf2f8860f617f28ab68d5134bacce 100644 (file)
@@ -43,19 +43,19 @@ struct drm_modeset_acquire_ctx {
 
        struct ww_acquire_ctx ww_ctx;
 
-       /**
+       /*
         * Contended lock: if a lock is contended you should only call
         * drm_modeset_backoff() which drops locks and slow-locks the
         * contended lock.
         */
        struct drm_modeset_lock *contended;
 
-       /**
+       /*
         * list of held locks (drm_modeset_lock)
         */
        struct list_head locked;
 
-       /**
+       /*
         * Trylock mode, use only for panic handlers!
         */
        bool trylock_only;
@@ -70,12 +70,12 @@ struct drm_modeset_acquire_ctx {
  * Used for locking CRTCs and other modeset resources.
  */
 struct drm_modeset_lock {
-       /**
+       /*
         * modeset lock
         */
        struct ww_mutex mutex;
 
-       /**
+       /*
         * Resources that are locked as part of an atomic update are added
         * to a list (so we know what to unlock at the end).
         */
index dda401bf910e1eace1dbecbb7db92ca342ace306..5a7f9d4efb1d5456a66081cf338a9e25b44d68f9 100644 (file)
@@ -58,10 +58,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
  */
 struct drm_plane_helper_funcs {
        int (*prepare_fb)(struct drm_plane *plane,
-                         struct drm_framebuffer *fb,
                          const struct drm_plane_state *new_state);
        void (*cleanup_fb)(struct drm_plane *plane,
-                          struct drm_framebuffer *fb,
                           const struct drm_plane_state *old_state);
 
        int (*atomic_check)(struct drm_plane *plane,
index bc9afa74ee11cb3f7c2876280aad4da50ad6df7b..be40dbaed11e06a93eedd67ccff968d7e1aa39fa 100644 (file)
@@ -156,7 +156,7 @@ struct fb_cursor_user {
 #define FB_EVENT_GET_REQ                0x0D
 /*      Unbind from the console if possible */
 #define FB_EVENT_FB_UNBIND              0x0E
-/*      CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
+/*      CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */
 #define FB_EVENT_REMAP_ALL_CONSOLE      0x0F
 /*      A hardware display blank early change occured */
 #define FB_EARLY_EVENT_BLANK           0x10
index b483abd344934f9cb9827d112883bfdf8e861d2b..37649919771754cb2a953a3793a600aa9e16612b 100644 (file)
@@ -1,10 +1,31 @@
 /*
+ * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
+ *
  * Copyright (c) 2010 Red Hat Inc.
  * Author : Dave Airlie <airlied@redhat.com>
  *
- * Licensed under GPLv2
+ * Copyright (c) 2015 Lukas Wunner <lukas@wunner.de>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS
+ * IN THE SOFTWARE.
  *
- * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
  */
 
 #ifndef _LINUX_VGA_SWITCHEROO_H_
 
 struct pci_dev;
 
+/**
+ * enum vga_switcheroo_state - client power state
+ * @VGA_SWITCHEROO_OFF: off
+ * @VGA_SWITCHEROO_ON: on
+ * @VGA_SWITCHEROO_INIT: client has registered with vga_switcheroo but
+ *     vga_switcheroo is not enabled, i.e. no second client or no handler
+ *     has registered. Only used in vga_switcheroo_get_client_state() which
+ *     in turn is only called from hda_intel.c
+ * @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo.
+ *     Only used in vga_switcheroo_get_client_state() which in turn is only
+ *     called from hda_intel.c
+ *
+ * Client power state.
+ */
 enum vga_switcheroo_state {
        VGA_SWITCHEROO_OFF,
        VGA_SWITCHEROO_ON,
@@ -22,20 +57,64 @@ enum vga_switcheroo_state {
        VGA_SWITCHEROO_NOT_FOUND,
 };
 
+/**
+ * enum vga_switcheroo_client_id - client identifier
+ * @VGA_SWITCHEROO_IGD: integrated graphics device
+ * @VGA_SWITCHEROO_DIS: discrete graphics device
+ * @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported
+ *
+ * Client identifier. Audio clients use the same identifier & 0x100.
+ */
 enum vga_switcheroo_client_id {
        VGA_SWITCHEROO_IGD,
        VGA_SWITCHEROO_DIS,
        VGA_SWITCHEROO_MAX_CLIENTS,
 };
 
+/**
+ * struct vga_switcheroo_handler - handler callbacks
+ * @init: initialize handler.
+ *     Optional. This gets called when vga_switcheroo is enabled, i.e. when
+ *     two vga clients have registered. It allows the handler to perform
+ *     some delayed initialization that depends on the existence of the
+ *     vga clients. Currently only the radeon and amdgpu drivers use this.
+ *     The return value is ignored
+ * @switchto: switch outputs to given client.
+ *     Mandatory. For muxless machines this should be a no-op. Returning 0
+ *     denotes success, anything else failure (in which case the switch is
+ *     aborted)
+ * @power_state: cut or reinstate power of given client.
+ *     Optional. The return value is ignored
+ * @get_client_id: determine if given pci device is integrated or discrete GPU.
+ *     Mandatory
+ *
+ * Handler callbacks. The multiplexer itself. The @switchto and @get_client_id
+ * methods are mandatory, all others may be set to NULL.
+ */
 struct vga_switcheroo_handler {
+       int (*init)(void);
        int (*switchto)(enum vga_switcheroo_client_id id);
        int (*power_state)(enum vga_switcheroo_client_id id,
                           enum vga_switcheroo_state state);
-       int (*init)(void);
        int (*get_client_id)(struct pci_dev *pdev);
 };
 
+/**
+ * struct vga_switcheroo_client_ops - client callbacks
+ * @set_gpu_state: do the equivalent of suspend/resume for the card.
+ *     Mandatory. This should not cut power to the discrete GPU,
+ *     which is the job of the handler
+ * @reprobe: poll outputs.
+ *     Optional. This gets called after waking the GPU and switching
+ *     the outputs to it
+ * @can_switch: check if the device is in a position to switch now.
+ *     Mandatory. The client should return false if a user space process
+ *     has one of its device files open
+ *
+ * Client callbacks. A client can be either a GPU or an audio device on a GPU.
+ * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
+ * set to NULL. For audio clients, the @reprobe member is bogus.
+ */
 struct vga_switcheroo_client_ops {
        void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
        void (*reprobe)(struct pci_dev *dev);
@@ -49,7 +128,7 @@ int vga_switcheroo_register_client(struct pci_dev *dev,
                                   bool driver_power_control);
 int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
                                         const struct vga_switcheroo_client_ops *ops,
-                                        int id, bool active);
+                                        int id);
 
 void vga_switcheroo_client_fb_set(struct pci_dev *dev,
                                  struct fb_info *info);
@@ -75,7 +154,7 @@ static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_i
 static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
 static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
        const struct vga_switcheroo_client_ops *ops,
-       int id, bool active) { return 0; }
+       int id) { return 0; }
 static inline void vga_switcheroo_unregister_handler(void) {}
 static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
 static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
index 359107ab629efa1cc1aed023897e1873cd1ac98b..6c11ca401de8b4f4400478f500352ddd48380644 100644 (file)
 
 struct drm_mode_modeinfo {
        __u32 clock;
-       __u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
-       __u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
+       __u16 hdisplay;
+       __u16 hsync_start;
+       __u16 hsync_end;
+       __u16 htotal;
+       __u16 hskew;
+       __u16 vdisplay;
+       __u16 vsync_start;
+       __u16 vsync_end;
+       __u16 vtotal;
+       __u16 vscan;
 
        __u32 vrefresh;
 
@@ -124,8 +132,10 @@ struct drm_mode_card_res {
        __u32 count_crtcs;
        __u32 count_connectors;
        __u32 count_encoders;
-       __u32 min_width, max_width;
-       __u32 min_height, max_height;
+       __u32 min_width;
+       __u32 max_width;
+       __u32 min_height;
+       __u32 max_height;
 };
 
 struct drm_mode_crtc {
@@ -135,7 +145,8 @@ struct drm_mode_crtc {
        __u32 crtc_id; /**< Id */
        __u32 fb_id; /**< Id of framebuffer */
 
-       __u32 x, y; /**< Position on the frameuffer */
+       __u32 x; /**< x Position on the framebuffer */
+       __u32 y; /**< y Position on the framebuffer */
 
        __u32 gamma_size;
        __u32 mode_valid;
@@ -153,12 +164,16 @@ struct drm_mode_set_plane {
        __u32 flags; /* see above flags */
 
        /* Signed dest location allows it to be partially off screen */
-       __s32 crtc_x, crtc_y;
-       __u32 crtc_w, crtc_h;
+       __s32 crtc_x;
+       __s32 crtc_y;
+       __u32 crtc_w;
+       __u32 crtc_h;
 
        /* Source values are 16.16 fixed point */
-       __u32 src_x, src_y;
-       __u32 src_h, src_w;
+       __u32 src_x;
+       __u32 src_y;
+       __u32 src_h;
+       __u32 src_w;
 };
 
 struct drm_mode_get_plane {
@@ -244,7 +259,8 @@ struct drm_mode_get_connector {
        __u32 connector_type_id;
 
        __u32 connection;
-       __u32 mm_width, mm_height; /**< HxW in millimeters */
+       __u32 mm_width;  /**< width in millimeters */
+       __u32 mm_height; /**< height in millimeters */
        __u32 subpixel;
 
        __u32 pad;
@@ -327,7 +343,8 @@ struct drm_mode_get_blob {
 
 struct drm_mode_fb_cmd {
        __u32 fb_id;
-       __u32 width, height;
+       __u32 width;
+       __u32 height;
        __u32 pitch;
        __u32 bpp;
        __u32 depth;
@@ -340,7 +357,8 @@ struct drm_mode_fb_cmd {
 
 struct drm_mode_fb_cmd2 {
        __u32 fb_id;
-       __u32 width, height;
+       __u32 width;
+       __u32 height;
        __u32 pixel_format; /* fourcc code from drm_fourcc.h */
        __u32 flags; /* see above flags */
 
index df3763222d7345dc10b64492326acb9d33dc0e7d..374858cdcdaa3e216216339baa6efb43b72c14ec 100644 (file)
@@ -64,8 +64,4 @@ typedef struct {
        unsigned long offset, size;
 } drm_sis_fb_t;
 
-struct sis_file_private {
-       struct list_head obj_list;
-};
-
 #endif                         /* __SIS_DRM_H__ */
index 8b0533ccbd5ae6aab5443f37d606b67c9777a571..45bc80c3714b54c0658b227898a2916a4832b599 100644 (file)
@@ -274,8 +274,4 @@ typedef struct drm_via_dmablit {
        drm_via_blitsync_t sync;
 } drm_via_dmablit_t;
 
-struct via_file_private {
-       struct list_head obj_list;
-};
-
 #endif                         /* _VIA_DRM_H_ */
index c38c68f579381d657786945baa3ab99b3ccae787..e819013959d97d82606da618fa26354f119c682f 100644 (file)
@@ -1143,8 +1143,7 @@ static int register_vga_switcheroo(struct azx *chip)
         * is there any machine with two switchable HDMI audio controllers?
         */
        err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
-                                                   VGA_SWITCHEROO_DIS,
-                                                   hda->probe_continued);
+                                                  VGA_SWITCHEROO_DIS);
        if (err < 0)
                return err;
        hda->vga_switcheroo_registered = 1;