]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge tag 'topic/drm-misc-2015-10-19' of git://anongit.freedesktop.org/drm-intel...
authorDave Airlie <airlied@redhat.com>
Mon, 19 Oct 2015 23:01:49 +0000 (09:01 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 19 Oct 2015 23:01:49 +0000 (09:01 +1000)
More drm-misc for 4.4.
- fb refcount fix in atomic fbdev
- various locking reworks to reduce drm_global_mutex and dev->struct_mutex
- rename docbook to gpu.tmpl and include vga_switcheroo stuff, plus more
  vga_switcheroo (Lukas Wunner)
- viewport check fixes for atomic drivers from Ville
- DRM_DEBUG_VBL from Ville
- non-contentious header fixes from Mikko Rapeli
- small things all over

* tag 'topic/drm-misc-2015-10-19' of git://anongit.freedesktop.org/drm-intel: (31 commits)
  drm/fb-helper: Fix fb refcounting in pan_display_atomic
  drm/fb-helper: Set plane rotation directly
  drm: fix mutex leak in drm_dp_get_mst_branch_device
  drm: Check plane src coordinates correctly during page flip for atomic drivers
  drm: Check crtc viewport correctly with rotated primary plane on atomic drivers
  drm: Refactor plane src coordinate checks
  drm: Swap w/h when converting the mode to src coordidates for a rotated primary plane
  drm: Don't leak fb when plane crtc coodinates are bad
  ALSA: hda - Spell vga_switcheroo consistently
  drm/gem: Use kref_get_unless_zero for the weak mmap references
  drm/vgem: Drop vgem_drm_gem_mmap
  drm: Fix return value of drm_framebuffer_init()
  drm/gem: Use container_of in drm_gem_object_free
  drm/gem: Check locking in drm_gem_object_unreference
  drm/gem: Drop struct_mutex requirement from drm_gem_mmap_obj
  drm/i810_drm.h: include drm/drm.h
  r128_drm.h: include drm/drm.h
  savage_drm.h: include <drm/drm.h>
  gpu/doc: Convert to markdown harder
  gpu/doc: Add vga_switcheroo documentation
  ...

1  2 
Documentation/DocBook/gpu.tmpl
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/radeon/radeon_kms.c

index ac76a8b0baaacc9baf67cc80fbc7cf1313b88736,7f90c1e6569050b81870b703153701eaf4d3fee1..201dcd3c2e9d81fb10ae01abc07f5649e64ff146
@@@ -2,9 -2,9 +2,9 @@@
  <!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
        "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
  
- <book id="drmDevelopersGuide">
+ <book id="gpuDevelopersGuide">
    <bookinfo>
-     <title>Linux DRM Developer's Guide</title>
+     <title>Linux GPU Driver Developer's Guide</title>
  
      <authorgroup>
        <author>
          </address>
        </affiliation>
        </author>
+       <author>
+       <firstname>Lukas</firstname>
+       <surname>Wunner</surname>
+       <contrib>vga_switcheroo documentation</contrib>
+       <affiliation>
+         <address>
+           <email>lukas@wunner.de</email>
+         </address>
+       </affiliation>
+       </author>
      </authorgroup>
  
      <copyright>
        <year>2012</year>
        <holder>Laurent Pinchart</holder>
      </copyright>
+     <copyright>
+       <year>2015</year>
+       <holder>Lukas Wunner</holder>
+     </copyright>
  
      <legalnotice>
        <para>
        <revremark>Added extensive documentation about driver internals.
        </revremark>
        </revision>
+       <revision>
+       <revnumber>1.1</revnumber>
+       <date>2015-10-11</date>
+       <authorinitials>LW</authorinitials>
+       <revremark>Added vga_switcheroo documentation.
+       </revremark>
+       </revision>
      </revhistory>
    </bookinfo>
  
@@@ -78,9 -99,9 +99,9 @@@
    <title>DRM Core</title>
    <partintro>
      <para>
-       This first part of the DRM Developer's Guide documents core DRM code,
-       helper libraries for writing drivers and generic userspace interfaces
-       exposed by DRM drivers.
+       This first part of the GPU Driver Developer's Guide documents core DRM
+       code, helper libraries for writing drivers and generic userspace
+       interfaces exposed by DRM drivers.
      </para>
    </partintro>
  
@@@ -3583,10 -3604,11 +3604,11 @@@ void (*postclose) (struct drm_device *
        plane properties to default value, so that a subsequent open of the
        device will not inherit state from the previous user. It can also be
        used to execute delayed power switching state changes, e.g. in
-       conjunction with the vga_switcheroo infrastructure. Beyond that KMS
-       drivers should not do any further cleanup. Only legacy UMS drivers might
-       need to clean up device state so that the vga console or an independent
-       fbdev driver could take over.
+       conjunction with the vga_switcheroo infrastructure (see
+       <xref linkend="vga_switcheroo"/>). Beyond that KMS drivers should not
+       do any further cleanup. Only legacy UMS drivers might need to clean up
+       device state so that the vga console or an independent fbdev driver
+       could take over.
        </para>
      </sect2>
      <sect2>
@@@ -3684,7 -3706,9 +3706,9 @@@ int num_ioctls;</synopsis
            </para></listitem>
              <listitem><para>
              DRM_UNLOCKED - The ioctl handler will be called without locking
-             the DRM global mutex
+             the DRM global mutex. This is the enforced default for kms drivers
+             (i.e. using the DRIVER_MODESET flag) and hence shouldn't be used
+             any more for new drivers.
            </para></listitem>
          </itemizedlist>
        </para>
  
    <partintro>
      <para>
-       This second part of the DRM Developer's Guide documents driver code,
-       implementation details and also all the driver-specific userspace
+       This second part of the GPU Driver Developer's Guide documents driver
+       code, implementation details and also all the driver-specific userspace
        interfaces. Especially since all hardware-acceleration interfaces to
        userspace are driver specific for efficiency and other reasons these
        interfaces can be rather substantial. Hence every driver has its own
        <title>High Definition Audio</title>
  !Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
  !Idrivers/gpu/drm/i915/intel_audio.c
 +!Iinclude/drm/i915_component.h
        </sect2>
        <sect2>
        <title>Panel Self Refresh PSR (PSR/SRD)</title>
  !Idrivers/gpu/drm/i915/i915_gem_shrinker.c
        </sect2>
      </sect1>
 +    <sect1>
 +      <title>GuC-based Command Submission</title>
 +      <sect2>
 +        <title>GuC</title>
 +!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
 +!Idrivers/gpu/drm/i915/intel_guc_loader.c
 +      </sect2>
 +      <sect2>
 +        <title>GuC Client</title>
 +!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison
 +!Idrivers/gpu/drm/i915/i915_guc_submission.c
 +      </sect2>
 +    </sect1>
 +
      <sect1>
        <title> Tracing </title>
        <para>
    </chapter>
  !Cdrivers/gpu/drm/i915/i915_irq.c
  </part>
+ <part id="vga_switcheroo">
+   <title>vga_switcheroo</title>
+   <partintro>
+ !Pdrivers/gpu/vga/vga_switcheroo.c Overview
+   </partintro>
+   <chapter id="modes_of_use">
+     <title>Modes of Use</title>
+   <sect1>
+     <title>Manual switching and manual power control</title>
+ !Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control
+   </sect1>
+   <sect1>
+     <title>Driver power control</title>
+ !Pdrivers/gpu/vga/vga_switcheroo.c Driver power control
+   </sect1>
+   </chapter>
+   <chapter id="pubfunctions">
+     <title>Public functions</title>
+ !Edrivers/gpu/vga/vga_switcheroo.c
+   </chapter>
+   <chapter id="pubstructures">
+     <title>Public structures</title>
+ !Finclude/linux/vga_switcheroo.h vga_switcheroo_handler
+ !Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops
+   </chapter>
+   <chapter id="pubconstants">
+     <title>Public constants</title>
+ !Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
+ !Finclude/linux/vga_switcheroo.h vga_switcheroo_state
+   </chapter>
+   <chapter id="privstructures">
+     <title>Private structures</title>
+ !Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv
+ !Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client
+   </chapter>
+ !Cdrivers/gpu/vga/vga_switcheroo.c
+ !Cinclude/linux/vga_switcheroo.h
+ </part>
  </book>
index dd85a0ae05c33bd0311536efa9c25b605d024238,371f015c1873d6c67cd127bdeddcbe0adf72ac3d..1618e2294a16056171458998ed65e62a83583774
@@@ -218,8 -218,8 +218,8 @@@ static int amdgpu_info_ioctl(struct drm
                        break;
                case AMDGPU_HW_IP_DMA:
                        type = AMD_IP_BLOCK_TYPE_SDMA;
 -                      ring_mask = adev->sdma[0].ring.ready ? 1 : 0;
 -                      ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1);
 +                      for (i = 0; i < adev->sdma.num_instances; i++)
 +                              ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
                        ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
                        ib_size_alignment = 1;
                        break;
                        fw_info.feature = 0;
                        break;
                case AMDGPU_INFO_FW_SDMA:
 -                      if (info->query_fw.index >= 2)
 +                      if (info->query_fw.index >= adev->sdma.num_instances)
                                return -EINVAL;
 -                      fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
 -                      fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
 +                      fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
 +                      fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
                        break;
                default:
                        return -EINVAL;
   * Outdated mess for old drm with Xorg being in charge (void function now).
   */
  /**
 - * amdgpu_driver_firstopen_kms - drm callback for last close
 + * amdgpu_driver_lastclose_kms - drm callback for last close
   *
   * @dev: drm dev pointer
   *
 - * Switch vga switcheroo state after last close (all asics).
 + * Switch vga_switcheroo state after last close (all asics).
   */
  void amdgpu_driver_lastclose_kms(struct drm_device *dev)
  {
 +      struct amdgpu_device *adev = dev->dev_private;
 +
 +      amdgpu_fbdev_restore_mode(adev);
        vga_switcheroo_process_delayed_switch();
  }
  
@@@ -689,18 -686,18 +689,18 @@@ int amdgpu_get_vblank_timestamp_kms(str
  }
  
  const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
-       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        /* KMS */
-       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  };
  int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
index 63d909e5f63b11e0f6d8d6603898696980fe5983,5646b54948c77abe1c8b3d56835f81914df3744a..1cbb080f0c4ace970c8a37723b677948b944e2c1
  #include <drm/armada_drm.h>
  #include "armada_ioctlP.h"
  
 -#ifdef CONFIG_DRM_ARMADA_TDA1998X
 -#include <drm/i2c/tda998x.h>
 -#include "armada_slave.h"
 -
 -static struct tda998x_encoder_params params = {
 -      /* With 0x24, there is no translation between vp_out and int_vp
 -      FB      LCD out Pins    VIP     Int Vp
 -      R:23:16 R:7:0   VPC7:0  7:0     7:0[R]
 -      G:15:8  G:15:8  VPB7:0  23:16   23:16[G]
 -      B:7:0   B:23:16 VPA7:0  15:8    15:8[B]
 -      */
 -      .swap_a = 2,
 -      .swap_b = 3,
 -      .swap_c = 4,
 -      .swap_d = 5,
 -      .swap_e = 0,
 -      .swap_f = 1,
 -      .audio_cfg = BIT(2),
 -      .audio_frame[1] = 1,
 -      .audio_format = AFMT_SPDIF,
 -      .audio_sample_rate = 44100,
 -};
 -
 -static const struct armada_drm_slave_config tda19988_config = {
 -      .i2c_adapter_id = 0,
 -      .crtcs = 1 << 0, /* Only LCD0 at the moment */
 -      .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
 -      .interlace_allowed = true,
 -      .info = {
 -              .type = "tda998x",
 -              .addr = 0x70,
 -              .platform_data = &params,
 -      },
 -};
 -#endif
 -
 -static bool is_componentized(struct device *dev)
 -{
 -      return dev->of_node || dev->platform_data;
 -}
 -
  static void armada_drm_unref_work(struct work_struct *work)
  {
        struct armada_private *priv =
@@@ -50,11 -91,16 +50,11 @@@ void armada_drm_queue_unref_work(struc
  
  static int armada_drm_load(struct drm_device *dev, unsigned long flags)
  {
 -      const struct platform_device_id *id;
 -      const struct armada_variant *variant;
        struct armada_private *priv;
 -      struct resource *res[ARRAY_SIZE(priv->dcrtc)];
        struct resource *mem = NULL;
 -      int ret, n, i;
 -
 -      memset(res, 0, sizeof(res));
 +      int ret, n;
  
 -      for (n = i = 0; ; n++) {
 +      for (n = 0; ; n++) {
                struct resource *r = platform_get_resource(dev->platformdev,
                                                           IORESOURCE_MEM, n);
                if (!r)
                /* Resources above 64K are graphics memory */
                if (resource_size(r) > SZ_64K)
                        mem = r;
 -              else if (i < ARRAY_SIZE(priv->dcrtc))
 -                      res[i++] = r;
                else
                        return -EINVAL;
        }
        platform_set_drvdata(dev->platformdev, dev);
        dev->dev_private = priv;
  
 -      /* Get the implementation specific driver data. */
 -      id = platform_get_device_id(dev->platformdev);
 -      if (!id)
 -              return -ENXIO;
 -
 -      variant = (const struct armada_variant *)id->driver_data;
 -
        INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
        INIT_KFIFO(priv->fb_unref);
  
        dev->mode_config.funcs = &armada_drm_mode_config_funcs;
        drm_mm_init(&priv->linear, mem->start, resource_size(mem));
  
 -      /* Create all LCD controllers */
 -      for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
 -              int irq;
 -
 -              if (!res[n])
 -                      break;
 -
 -              irq = platform_get_irq(dev->platformdev, n);
 -              if (irq < 0)
 -                      goto err_kms;
 -
 -              ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
 -                                           variant, NULL);
 -              if (ret)
 -                      goto err_kms;
 -      }
 -
 -      if (is_componentized(dev->dev)) {
 -              ret = component_bind_all(dev->dev, dev);
 -              if (ret)
 -                      goto err_kms;
 -      } else {
 -#ifdef CONFIG_DRM_ARMADA_TDA1998X
 -              ret = armada_drm_connector_slave_create(dev, &tda19988_config);
 -              if (ret)
 -                      goto err_kms;
 -#endif
 -      }
 +      ret = component_bind_all(dev->dev, dev);
 +      if (ret)
 +              goto err_kms;
  
        ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
        if (ret)
        return 0;
  
   err_comp:
 -      if (is_componentized(dev->dev))
 -              component_unbind_all(dev->dev, dev);
 +      component_unbind_all(dev->dev, dev);
   err_kms:
        drm_mode_config_cleanup(dev);
        drm_mm_takedown(&priv->linear);
@@@ -138,7 -219,8 +138,7 @@@ static int armada_drm_unload(struct drm
        drm_kms_helper_poll_fini(dev);
        armada_fbdev_fini(dev);
  
 -      if (is_componentized(dev->dev))
 -              component_unbind_all(dev->dev, dev);
 +      component_unbind_all(dev->dev, dev);
  
        drm_mode_config_cleanup(dev);
        drm_mm_takedown(&priv->linear);
        return 0;
  }
  
 -void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
 -      struct armada_vbl_event *evt)
 -{
 -      unsigned long flags;
 -
 -      spin_lock_irqsave(&dcrtc->irq_lock, flags);
 -      if (list_empty(&evt->node)) {
 -              list_add_tail(&evt->node, &dcrtc->vbl_list);
 -
 -              drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
 -      }
 -      spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
 -}
 -
 -void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
 -      struct armada_vbl_event *evt)
 -{
 -      if (!list_empty(&evt->node)) {
 -              list_del_init(&evt->node);
 -              drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
 -      }
 -}
 -
  /* These are called under the vbl_lock. */
  static int armada_drm_enable_vblank(struct drm_device *dev, unsigned int pipe)
  {
@@@ -163,12 -268,9 +163,9 @@@ static void armada_drm_disable_vblank(s
  }
  
  static struct drm_ioctl_desc armada_ioctls[] = {
-       DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
-               DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
-               DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
-               DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
+       DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
+       DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0),
  };
  
  static void armada_drm_lastclose(struct drm_device *dev)
@@@ -330,28 -432,37 +327,28 @@@ static const struct component_master_op
  
  static int armada_drm_probe(struct platform_device *pdev)
  {
 -      if (is_componentized(&pdev->dev)) {
 -              struct component_match *match = NULL;
 -              int ret;
 -
 -              ret = armada_drm_find_components(&pdev->dev, &match);
 -              if (ret < 0)
 -                      return ret;
 -
 -              return component_master_add_with_match(&pdev->dev,
 -                              &armada_master_ops, match);
 -      } else {
 -              return drm_platform_init(&armada_drm_driver, pdev);
 -      }
 +      struct component_match *match = NULL;
 +      int ret;
 +
 +      ret = armada_drm_find_components(&pdev->dev, &match);
 +      if (ret < 0)
 +              return ret;
 +
 +      return component_master_add_with_match(&pdev->dev, &armada_master_ops,
 +                                             match);
  }
  
  static int armada_drm_remove(struct platform_device *pdev)
  {
 -      if (is_componentized(&pdev->dev))
 -              component_master_del(&pdev->dev, &armada_master_ops);
 -      else
 -              drm_put_dev(platform_get_drvdata(pdev));
 +      component_master_del(&pdev->dev, &armada_master_ops);
        return 0;
  }
  
  static const struct platform_device_id armada_drm_platform_ids[] = {
        {
                .name           = "armada-drm",
 -              .driver_data    = (unsigned long)&armada510_ops,
        }, {
                .name           = "armada-510-drm",
 -              .driver_data    = (unsigned long)&armada510_ops,
        },
        { },
  };
index bf27a07dbce36993e7ed668ff567f5565a6e0995,e4c9b4a68c046a2353960e202848112368863a5e..2d46fc6230c27ff8e89003f0856f719c4f39693f
@@@ -53,8 -53,8 +53,8 @@@ static int drm_dp_send_dpcd_write(struc
                                  struct drm_dp_mst_port *port,
                                  int offset, int size, u8 *bytes);
  
 -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 -                                  struct drm_dp_mst_branch *mstb);
 +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 +                                   struct drm_dp_mst_branch *mstb);
  static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
                                           struct drm_dp_mst_branch *mstb,
                                           struct drm_dp_mst_port *port);
@@@ -804,6 -804,8 +804,6 @@@ static void drm_dp_destroy_mst_branch_d
        struct drm_dp_mst_port *port, *tmp;
        bool wake_tx = false;
  
 -      cancel_work_sync(&mstb->mgr->work);
 -
        /*
         * destroy all ports - don't need lock
         * as there are no more references to the mst branch
@@@ -861,33 -863,29 +861,33 @@@ static void drm_dp_destroy_port(struct 
  {
        struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
 +
        if (!port->input) {
                port->vcpi.num_slots = 0;
  
                kfree(port->cached_edid);
  
 -              /* we can't destroy the connector here, as
 -                 we might be holding the mode_config.mutex
 -                 from an EDID retrieval */
 +              /*
 +               * The only time we don't have a connector
 +               * on an output port is if the connector init
 +               * fails.
 +               */
                if (port->connector) {
 +                      /* we can't destroy the connector here, as
 +                       * we might be holding the mode_config.mutex
 +                       * from an EDID retrieval */
 +
                        mutex_lock(&mgr->destroy_connector_lock);
                        list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
                        return;
                }
 +              /* no need to clean up vcpi
 +               * as if we have no connector we never setup a vcpi */
                drm_dp_port_teardown_pdt(port, port->pdt);
 -
 -              if (!port->input && port->vcpi.vcpi > 0)
 -                      drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
        }
        kfree(port);
 -
 -      (*mgr->cbs->hotplug)(mgr);
  }
  
  static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@@ -1029,8 -1027,8 +1029,8 @@@ static void drm_dp_check_port_guid(stru
        }
  }
  
 -static void build_mst_prop_path(struct drm_dp_mst_port *port,
 -                              struct drm_dp_mst_branch *mstb,
 +static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
 +                              int pnum,
                                char *proppath,
                                size_t proppath_size)
  {
                snprintf(temp, sizeof(temp), "-%d", port_num);
                strlcat(proppath, temp, proppath_size);
        }
 -      snprintf(temp, sizeof(temp), "-%d", port->port_num);
 +      snprintf(temp, sizeof(temp), "-%d", pnum);
        strlcat(proppath, temp, proppath_size);
  }
  
@@@ -1107,32 -1105,22 +1107,32 @@@ static void drm_dp_add_port(struct drm_
                drm_dp_port_teardown_pdt(port, old_pdt);
  
                ret = drm_dp_port_setup_pdt(port);
 -              if (ret == true) {
 +              if (ret == true)
                        drm_dp_send_link_address(mstb->mgr, port->mstb);
 -                      port->mstb->link_address_sent = true;
 -              }
        }
  
        if (created && !port->input) {
                char proppath[255];
 -              build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
 -              port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
  
 -              if (port->port_num >= 8) {
 +              build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
 +              port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
 +              if (!port->connector) {
 +                      /* remove it from the port list */
 +                      mutex_lock(&mstb->mgr->lock);
 +                      list_del(&port->next);
 +                      mutex_unlock(&mstb->mgr->lock);
 +                      /* drop port list reference */
 +                      drm_dp_put_port(port);
 +                      goto out;
 +              }
 +              if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
                        port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
 +                      drm_mode_connector_set_tile_property(port->connector);
                }
 +              (*mstb->mgr->cbs->register_connector)(port->connector);
        }
  
 +out:
        /* put reference to this port */
        drm_dp_put_port(port);
  }
@@@ -1194,17 -1182,18 +1194,18 @@@ static struct drm_dp_mst_branch *drm_dp
  
                list_for_each_entry(port, &mstb->ports, next) {
                        if (port->port_num == port_num) {
-                               if (!port->mstb) {
+                               mstb = port->mstb;
+                               if (!mstb) {
                                        DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
-                                       return NULL;
+                                       goto out;
                                }
  
-                               mstb = port->mstb;
                                break;
                        }
                }
        }
        kref_get(&mstb->kref);
+ out:
        mutex_unlock(&mgr->lock);
        return mstb;
  }
@@@ -1214,9 -1203,10 +1215,9 @@@ static void drm_dp_check_and_send_link_
  {
        struct drm_dp_mst_port *port;
        struct drm_dp_mst_branch *mstb_child;
 -      if (!mstb->link_address_sent) {
 +      if (!mstb->link_address_sent)
                drm_dp_send_link_address(mgr, mstb);
 -              mstb->link_address_sent = true;
 -      }
 +
        list_for_each_entry(port, &mstb->ports, next) {
                if (port->input)
                        continue;
@@@ -1469,8 -1459,8 +1470,8 @@@ static void drm_dp_queue_down_tx(struc
        mutex_unlock(&mgr->qlock);
  }
  
 -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 -                                  struct drm_dp_mst_branch *mstb)
 +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 +                                   struct drm_dp_mst_branch *mstb)
  {
        int len;
        struct drm_dp_sideband_msg_tx *txmsg;
  
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
        if (!txmsg)
 -              return -ENOMEM;
 +              return;
  
        txmsg->dst = mstb;
        len = build_link_address(txmsg);
  
 +      mstb->link_address_sent = true;
        drm_dp_queue_down_tx(mgr, txmsg);
  
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
                        }
                        (*mgr->cbs->hotplug)(mgr);
                }
 -      } else
 +      } else {
 +              mstb->link_address_sent = false;
                DRM_DEBUG_KMS("link address failed %d\n", ret);
 +      }
  
        kfree(txmsg);
 -      return 0;
  }
  
  static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@@ -1991,8 -1979,6 +1992,8 @@@ void drm_dp_mst_topology_mgr_suspend(st
        drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
                           DP_MST_EN | DP_UPSTREAM_IS_SRC);
        mutex_unlock(&mgr->lock);
 +      flush_work(&mgr->work);
 +      flush_work(&mgr->destroy_connector_work);
  }
  EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
  
@@@ -2278,10 -2264,10 +2279,10 @@@ struct edid *drm_dp_mst_get_edid(struc
  
        if (port->cached_edid)
                edid = drm_edid_duplicate(port->cached_edid);
 -      else
 +      else {
                edid = drm_get_edid(connector, &port->aux.ddc);
 -
 -      drm_mode_connector_set_tile_property(connector);
 +              drm_mode_connector_set_tile_property(connector);
 +      }
        drm_dp_put_port(port);
        return edid;
  }
@@@ -2686,7 -2672,7 +2687,7 @@@ static void drm_dp_destroy_connector_wo
  {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
        struct drm_dp_mst_port *port;
 -
 +      bool send_hotplug = false;
        /*
         * Not a regular list traverse as we have to drop the destroy
         * connector lock before destroying the connector, to avoid AB->BA
                if (!port->input && port->vcpi.vcpi > 0)
                        drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
                kfree(port);
 +              send_hotplug = true;
        }
 +      if (send_hotplug)
 +              (*mgr->cbs->hotplug)(mgr);
  }
  
  /**
@@@ -2765,7 -2748,6 +2766,7 @@@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_i
   */
  void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
  {
 +      flush_work(&mgr->work);
        flush_work(&mgr->destroy_connector_work);
        mutex_lock(&mgr->payload_lock);
        kfree(mgr->payloads);
index bd6d4ab2751250ab9af68f0aa9f91e0cb44463ce,5b2de1b3c0207fbab617f5961aa0ffc425a42c8b..e673c13c7391153d6c7f3c54b7c56a0e17d26bca
@@@ -360,11 -360,7 +360,7 @@@ retry
                        goto fail;
                }
  
-               ret = drm_atomic_plane_set_property(plane, plane_state,
-                               dev->mode_config.rotation_property,
-                               BIT(DRM_ROTATE_0));
-               if (ret != 0)
-                       goto fail;
+               plane_state->rotation = BIT(DRM_ROTATE_0);
  
                /* disable non-primary: */
                if (plane->type == DRM_PLANE_TYPE_PRIMARY)
@@@ -442,11 -438,7 +438,11 @@@ static int restore_fbdev_mode(struct dr
                struct drm_crtc *crtc = mode_set->crtc;
                int ret;
  
 -              if (crtc->funcs->cursor_set) {
 +              if (crtc->funcs->cursor_set2) {
 +                      ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
 +                      if (ret)
 +                              return ret;
 +              } else if (crtc->funcs->cursor_set) {
                        ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
                        if (ret)
                                return ret;
@@@ -1235,7 -1227,7 +1231,7 @@@ int drm_fb_helper_set_par(struct fb_inf
  EXPORT_SYMBOL(drm_fb_helper_set_par);
  
  static int pan_display_atomic(struct fb_var_screeninfo *var,
-               struct fb_info *info)
+                             struct fb_info *info)
  {
        struct drm_fb_helper *fb_helper = info->par;
        struct drm_device *dev = fb_helper->dev;
@@@ -1253,6 -1245,8 +1249,8 @@@ retry
  
                mode_set = &fb_helper->crtc_info[i].mode_set;
  
+               mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
                mode_set->x = var->xoffset;
                mode_set->y = var->yoffset;
  
        info->var.xoffset = var->xoffset;
        info->var.yoffset = var->yoffset;
  
-       return 0;
  
  fail:
+       for(i = 0; i < fb_helper->crtc_count; i++) {
+               struct drm_mode_set *mode_set;
+               struct drm_plane *plane;
+               mode_set = &fb_helper->crtc_info[i].mode_set;
+               plane = mode_set->crtc->primary;
+               if (ret == 0) {
+                       struct drm_framebuffer *new_fb = plane->state->fb;
+                       if (new_fb)
+                               drm_framebuffer_reference(new_fb);
+                       plane->fb = new_fb;
+                       plane->crtc = plane->state->crtc;
+                       if (plane->old_fb)
+                               drm_framebuffer_unreference(plane->old_fb);
+               }
+               plane->old_fb = NULL;
+       }
        if (ret == -EDEADLK)
                goto backoff;
  
-       drm_atomic_state_free(state);
+       if (ret != 0)
+               drm_atomic_state_free(state);
  
        return ret;
  
index d8568af2b4144c65915d17030e72e11f3a772437,3d565be3996318738babf34a99c16a7445579395..09c4c6af8cd11dfc0222f1be9fcce2a8402432cf
@@@ -304,7 -304,6 +304,7 @@@ int exynos_atomic_commit(struct drm_dev
        return 0;
  }
  
 +#ifdef CONFIG_PM_SLEEP
  static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
  {
        struct drm_connector *connector;
@@@ -341,7 -340,6 +341,7 @@@ static int exynos_drm_resume(struct drm
  
        return 0;
  }
 +#endif
  
  static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
  {
@@@ -405,25 -403,25 +405,25 @@@ static const struct vm_operations_struc
  
  static const struct drm_ioctl_desc exynos_ioctls[] = {
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
-                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+                       DRM_AUTH | DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
-                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                       DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
-                       DRM_UNLOCKED | DRM_AUTH),
+                       DRM_AUTH),
        DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
-                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+                       DRM_AUTH | DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
-                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+                       DRM_AUTH | DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
-                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+                       DRM_AUTH | DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
-                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+                       DRM_AUTH | DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
-                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+                       DRM_AUTH | DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
-                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+                       DRM_AUTH | DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
-                       DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
+                       DRM_AUTH | DRM_RENDER_ALLOW),
  };
  
  static const struct file_operations exynos_drm_driver_fops = {
index 1e3d65743bd240075c9fd568d64e45f7bd1dd0d6,68b0c9eb9282e00154dbec5a8c997aceb6222a31..2336af92d94bb5c5d3d101227cce4b8041feffb7
@@@ -335,12 -335,12 +335,12 @@@ static void i915_switcheroo_set_state(s
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                /* i915 resume handler doesn't set to D0 */
                pci_set_power_state(dev->pdev, PCI_D0);
 -              i915_resume_legacy(dev);
 +              i915_resume_switcheroo(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
                pr_err("switched off\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 -              i915_suspend_legacy(dev, pmm);
 +              i915_suspend_switcheroo(dev, pmm);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
        }
  }
@@@ -406,11 -406,6 +406,11 @@@ static int i915_load_modeset_init(struc
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
  
 +      /* intel_guc_ucode_init() needs the mutex to allocate GEM objects */
 +      mutex_lock(&dev->struct_mutex);
 +      intel_guc_ucode_init(dev);
 +      mutex_unlock(&dev->struct_mutex);
 +
        ret = i915_gem_init(dev);
        if (ret)
                goto cleanup_irq;
@@@ -452,9 -447,6 +452,9 @@@ cleanup_gem
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
  cleanup_irq:
 +      mutex_lock(&dev->struct_mutex);
 +      intel_guc_ucode_fini(dev);
 +      mutex_unlock(&dev->struct_mutex);
        drm_irq_uninstall(dev);
  cleanup_gem_stolen:
        i915_gem_cleanup_stolen(dev);
@@@ -602,6 -594,17 +602,6 @@@ static void gen9_sseu_info_init(struct 
        u32 fuse2, s_enable, ss_disable, eu_disable;
        u8 eu_mask = 0xff;
  
 -      /*
 -       * BXT has a single slice. BXT also has at most 6 EU per subslice,
 -       * and therefore only the lowest 6 bits of the 8-bit EU disable
 -       * fields are valid.
 -      */
 -      if (IS_BROXTON(dev)) {
 -              s_max = 1;
 -              eu_max = 6;
 -              eu_mask = 0x3f;
 -      }
 -
        info = (struct intel_device_info *)&dev_priv->info;
        fuse2 = I915_READ(GEN8_FUSE2);
        s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
        info->has_eu_pg = (info->eu_per_subslice > 2);
  }
  
 +static void broadwell_sseu_info_init(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct intel_device_info *info;
 +      const int s_max = 3, ss_max = 3, eu_max = 8;
 +      int s, ss;
 +      u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
 +
 +      fuse2 = I915_READ(GEN8_FUSE2);
 +      s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
 +      ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
 +
 +      eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
 +      eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
 +                      ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
 +                       (32 - GEN8_EU_DIS0_S1_SHIFT));
 +      eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
 +                      ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
 +                       (32 - GEN8_EU_DIS1_S2_SHIFT));
 +
 +
 +      info = (struct intel_device_info *)&dev_priv->info;
 +      info->slice_total = hweight32(s_enable);
 +
 +      /*
 +       * The subslice disable field is global, i.e. it applies
 +       * to each of the enabled slices.
 +       */
 +      info->subslice_per_slice = ss_max - hweight32(ss_disable);
 +      info->subslice_total = info->slice_total * info->subslice_per_slice;
 +
 +      /*
 +       * Iterate through enabled slices and subslices to
 +       * count the total enabled EU.
 +       */
 +      for (s = 0; s < s_max; s++) {
 +              if (!(s_enable & (0x1 << s)))
 +                      /* skip disabled slice */
 +                      continue;
 +
 +              for (ss = 0; ss < ss_max; ss++) {
 +                      u32 n_disabled;
 +
 +                      if (ss_disable & (0x1 << ss))
 +                              /* skip disabled subslice */
 +                              continue;
 +
 +                      n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
 +
 +                      /*
 +                       * Record which subslices have 7 EUs.
 +                       */
 +                      if (eu_max - n_disabled == 7)
 +                              info->subslice_7eu[s] |= 1 << ss;
 +
 +                      info->eu_total += eu_max - n_disabled;
 +              }
 +      }
 +
 +      /*
 +       * BDW is expected to always have a uniform distribution of EU across
 +       * subslices with the exception that any one EU in any one subslice may
 +       * be fused off for die recovery.
 +       */
 +      info->eu_per_subslice = info->subslice_total ?
 +              DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
 +
 +      /*
 +       * BDW supports slice power gating on devices with more than
 +       * one slice.
 +       */
 +      info->has_slice_pg = (info->slice_total > 1);
 +      info->has_subslice_pg = 0;
 +      info->has_eu_pg = 0;
 +}
 +
  /*
   * Determine various intel_device_info fields at runtime.
   *
@@@ -819,8 -746,6 +819,8 @@@ static void intel_device_info_runtime_i
        /* Initialize slice/subslice/EU info */
        if (IS_CHERRYVIEW(dev))
                cherryview_sseu_info_init(dev);
 +      else if (IS_BROADWELL(dev))
 +              broadwell_sseu_info_init(dev);
        else if (INTEL_INFO(dev)->gen >= 9)
                gen9_sseu_info_init(dev);
  
                         info->has_eu_pg ? "y" : "n");
  }
  
 +static void intel_init_dpio(struct drm_i915_private *dev_priv)
 +{
 +      if (!IS_VALLEYVIEW(dev_priv))
 +              return;
 +
 +      /*
 +       * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
 +       * CHV x1 PHY (DP/HDMI D)
 +       * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
 +       */
 +      if (IS_CHERRYVIEW(dev_priv)) {
 +              DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
 +              DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
 +      } else {
 +              DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
 +      }
 +}
 +
  /**
   * i915_driver_load - setup chip and create an initial config
   * @dev: DRM device
@@@ -896,7 -803,6 +896,7 @@@ int i915_driver_load(struct drm_device 
        mutex_init(&dev_priv->sb_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
        mutex_init(&dev_priv->csr_lock);
 +      mutex_init(&dev_priv->av_mutex);
  
        intel_pm_setup(dev);
  
        intel_setup_gmbus(dev);
        intel_opregion_setup(dev);
  
 -      intel_setup_bios(dev);
 -
        i915_gem_load(dev);
  
        /* On the 945G/GM, the chipset reports the MSI capability on the
  
        intel_device_info_runtime_init(dev);
  
 +      intel_init_dpio(dev_priv);
 +
        if (INTEL_INFO(dev)->num_pipes) {
                ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
                if (ret)
@@@ -1124,9 -1030,12 +1124,9 @@@ out_freecsr
  put_bridge:
        pci_dev_put(dev_priv->bridge_dev);
  free_priv:
 -      if (dev_priv->requests)
 -              kmem_cache_destroy(dev_priv->requests);
 -      if (dev_priv->vmas)
 -              kmem_cache_destroy(dev_priv->vmas);
 -      if (dev_priv->objects)
 -              kmem_cache_destroy(dev_priv->objects);
 +      kmem_cache_destroy(dev_priv->requests);
 +      kmem_cache_destroy(dev_priv->vmas);
 +      kmem_cache_destroy(dev_priv->objects);
        kfree(dev_priv);
        return ret;
  }
@@@ -1173,10 -1082,6 +1173,10 @@@ int i915_driver_unload(struct drm_devic
                dev_priv->vbt.child_dev = NULL;
                dev_priv->vbt.child_dev_num = 0;
        }
 +      kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
 +      dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
 +      kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
 +      dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
  
        vga_switcheroo_unregister_client(dev->pdev);
        vga_client_register(dev->pdev, NULL, NULL, NULL);
        flush_workqueue(dev_priv->wq);
  
        mutex_lock(&dev->struct_mutex);
 +      intel_guc_ucode_fini(dev);
        i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
        if (dev_priv->regs != NULL)
                pci_iounmap(dev->pdev, dev_priv->regs);
  
 -      if (dev_priv->requests)
 -              kmem_cache_destroy(dev_priv->requests);
 -      if (dev_priv->vmas)
 -              kmem_cache_destroy(dev_priv->vmas);
 -      if (dev_priv->objects)
 -              kmem_cache_destroy(dev_priv->objects);
 -
 +      kmem_cache_destroy(dev_priv->requests);
 +      kmem_cache_destroy(dev_priv->vmas);
 +      kmem_cache_destroy(dev_priv->objects);
        pci_dev_put(dev_priv->bridge_dev);
        kfree(dev_priv);
  
@@@ -1299,41 -1207,41 +1299,41 @@@ const struct drm_ioctl_desc i915_ioctls
        DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
  };
  
  int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
index 6f50a37b18cbccdb5f931c86b15e82b084a83040,4e2780f8c4175d1f0bfa5c3eea930f5e0d5b9654..5f687c95a1e131e430b2ef2ee985fad3722ea5cb
@@@ -598,17 -598,14 +598,17 @@@ static int radeon_info_ioctl(struct drm
   * Outdated mess for old drm with Xorg being in charge (void function now).
   */
  /**
 - * radeon_driver_firstopen_kms - drm callback for last close
 + * radeon_driver_lastclose_kms - drm callback for last close
   *
   * @dev: drm dev pointer
   *
 - * Switch vga switcheroo state after last close (all asics).
 + * Switch vga_switcheroo state after last close (all asics).
   */
  void radeon_driver_lastclose_kms(struct drm_device *dev)
  {
 +      struct radeon_device *rdev = dev->dev_private;
 +
 +      radeon_fbdev_restore_mode(rdev);
        vga_switcheroo_process_delayed_switch();
  }
  
@@@ -876,20 -873,20 +876,20 @@@ const struct drm_ioctl_desc radeon_ioct
        DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
        /* KMS */
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
  };
  int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);