]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'pfdo/drm-rcar-for-v3.12' into drm-next
authorDave Airlie <airlied@redhat.com>
Sun, 18 Aug 2013 23:24:13 +0000 (09:24 +1000)
committerDave Airlie <airlied@redhat.com>
Sun, 18 Aug 2013 23:24:13 +0000 (09:24 +1000)
Merge the rcar stable branch that is being shared with the arm-soc tree.

Signed-off-by: Dave Airlie <airlied@redhat.com>
* pfdo/drm-rcar-for-v3.12: (220 commits)
  drm/rcar-du: Add FBDEV emulation support
  drm/rcar-du: Add internal LVDS encoder support
  drm/rcar-du: Configure RGB output routing to DPAD0
  drm/rcar-du: Rework output routing support
  drm/rcar-du: Add support for DEFR8 register
  drm/rcar-du: Add support for multiple groups
  drm/rcar-du: Fix buffer pitch alignment for R8A7790 DU
  drm/rcar-du: Add support for the R8A7790 DU
  drm/rcar-du: Move output routing configuration to group
  drm/rcar-du: Remove register definitions for the second channel
  drm/rcar-du: Use dynamic number of CRTCs instead of CRTCs array size
  drm/rcar-du: Introduce CRTCs groups
  drm/rcar-du: Rename rcar_du_plane_(init|register) to rcar_du_planes_*
  drm/rcar-du: Create rcar_du_planes structure
  drm/rcar-du: Rename platform data fields to match what they describe
  drm/rcar-du: Merge LVDS and VGA encoder code
  drm/rcar-du: Split VGA encoder and connector
  drm/rcar-du: Split LVDS encoder and connector
  drm/rcar-du: Clarify comment regarding plane Y source coordinate
  drm/rcar-du: Support per-CRTC clock and IRQ
  ...

Conflicts:
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/qxl/qxl_release.c

1  2 
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_object.h
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/rcar-du/rcar_du_drv.c

index 05756f95be7cae521b3ad6dcac56307698217d7c,66c63808fa35e5834a876e59dd49735fdb6f82ac..f44c1a004f95e8d1c2bf9719fa0a4f131e6139b8
@@@ -1323,8 -1323,10 +1323,8 @@@ static int i915_load_modeset_init(struc
        /* Always safe in the mode setting case. */
        /* FIXME: do pre/post-mode set stuff in core KMS code */
        dev->vblank_disable_allowed = 1;
 -      if (INTEL_INFO(dev)->num_pipes == 0) {
 -              dev_priv->mm.suspended = 0;
 +      if (INTEL_INFO(dev)->num_pipes == 0)
                return 0;
 -      }
  
        ret = intel_fbdev_init(dev);
        if (ret)
  
        drm_kms_helper_poll_init(dev);
  
 -      /* We're off and running w/KMS */
 -      dev_priv->mm.suspended = 0;
 -
        return 0;
  
  cleanup_gem:
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
        i915_gem_cleanup_aliasing_ppgtt(dev);
 -      drm_mm_takedown(&dev_priv->mm.gtt_space);
 +      drm_mm_takedown(&dev_priv->gtt.base.mm);
  cleanup_irq:
        drm_irq_uninstall(dev);
  cleanup_gem_stolen:
@@@ -1435,6 -1440,22 +1435,6 @@@ static void i915_dump_device_info(struc
  #undef SEP_COMMA
  }
  
 -/**
 - * intel_early_sanitize_regs - clean up BIOS state
 - * @dev: DRM device
 - *
 - * This function must be called before we do any I915_READ or I915_WRITE. Its
 - * purpose is to clean up any state left by the BIOS that may affect us when
 - * reading and/or writing registers.
 - */
 -static void intel_early_sanitize_regs(struct drm_device *dev)
 -{
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 -
 -      if (HAS_FPGA_DBG_UNCLAIMED(dev))
 -              I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 -}
 -
  /**
   * i915_driver_load - setup chip and create an initial config
   * @dev: DRM device
@@@ -1476,19 -1497,15 +1476,19 @@@ int i915_driver_load(struct drm_device 
  
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
 -      spin_lock_init(&dev_priv->rps.lock);
 -      spin_lock_init(&dev_priv->gt_lock);
        spin_lock_init(&dev_priv->backlight.lock);
 +      spin_lock_init(&dev_priv->uncore.lock);
 +      spin_lock_init(&dev_priv->mm.object_stat_lock);
        mutex_init(&dev_priv->dpio_lock);
        mutex_init(&dev_priv->rps.hw_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
  
        i915_dump_device_info(dev_priv);
  
 +      INIT_LIST_HEAD(&dev_priv->vm_list);
 +      INIT_LIST_HEAD(&dev_priv->gtt.base.global_link);
 +      list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list);
 +
        if (i915_get_bridge_dev(dev)) {
                ret = -EIO;
                goto free_priv;
                goto put_bridge;
        }
  
 -      intel_early_sanitize_regs(dev);
 +      intel_uncore_early_sanitize(dev);
 +
 +      if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
 +              /* The docs do not explain exactly how the calculation can be
 +               * made. It is somewhat guessable, but for now, it's always
 +               * 128MB.
 +               * NB: We can't write IDICR yet because we do not have gt funcs
 +               * set up */
 +              dev_priv->ellc_size = 128;
 +              DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
 +      }
  
        ret = i915_gem_gtt_init(dev);
        if (ret)
                goto out_rmmap;
        }
  
 -      dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
 -                                               aperture_size);
 +      dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
 +                                            aperture_size);
  
        /* The i915 workqueue is primarily used for batched retirement of
         * requests (and thus managing bo) once the task has been completed
        intel_detect_pch(dev);
  
        intel_irq_init(dev);
 -      intel_gt_sanitize(dev);
 -      intel_gt_init(dev);
 +      intel_pm_init(dev);
 +      intel_uncore_sanitize(dev);
 +      intel_uncore_init(dev);
  
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev);
                        goto out_gem_unload;
        }
  
 -      /* Start out suspended */
 -      dev_priv->mm.suspended = 1;
 -
        if (HAS_POWER_WELL(dev))
                i915_init_power_well(dev);
  
                        DRM_ERROR("failed to init modeset\n");
                        goto out_gem_unload;
                }
 +      } else {
 +              /* Start out suspended in ums mode. */
 +              dev_priv->ums.mm_suspended = 1;
        }
  
        i915_setup_sysfs(dev);
        if (INTEL_INFO(dev)->num_pipes) {
                /* Must be done after probing outputs */
                intel_opregion_init(dev);
-               acpi_video_register_with_quirks();
+               acpi_video_register();
        }
  
        if (IS_GEN5(dev))
@@@ -1662,9 -1668,9 +1662,9 @@@ out_gem_unload
        intel_teardown_mchbar(dev);
        destroy_workqueue(dev_priv->wq);
  out_mtrrfree:
 -      arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
 +      arch_phys_wc_del(dev_priv->gtt.mtrr);
        io_mapping_free(dev_priv->gtt.mappable);
 -      dev_priv->gtt.gtt_remove(dev);
 +      dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
  out_rmmap:
        pci_iounmap(dev->pdev, dev_priv->regs);
  put_bridge:
@@@ -1700,7 -1706,7 +1700,7 @@@ int i915_driver_unload(struct drm_devic
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  
        io_mapping_free(dev_priv->gtt.mappable);
 -      arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
 +      arch_phys_wc_del(dev_priv->gtt.mtrr);
  
        acpi_video_unregister();
  
                        i915_free_hws(dev);
        }
  
 -      drm_mm_takedown(&dev_priv->mm.gtt_space);
 +      list_del(&dev_priv->gtt.base.global_link);
 +      WARN_ON(!list_empty(&dev_priv->vm_list));
 +      drm_mm_takedown(&dev_priv->gtt.base.mm);
        if (dev_priv->regs != NULL)
                pci_iounmap(dev->pdev, dev_priv->regs);
  
        destroy_workqueue(dev_priv->wq);
        pm_qos_remove_request(&dev_priv->pm_qos);
  
 -      dev_priv->gtt.gtt_remove(dev);
 +      dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
  
        if (dev_priv->slab)
                kmem_cache_destroy(dev_priv->slab);
@@@ -1837,7 -1841,7 +1837,7 @@@ void i915_driver_postclose(struct drm_d
        kfree(file_priv);
  }
  
 -struct drm_ioctl_desc i915_ioctls[] = {
 +const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
index af18da76c04b0ceae107c0294ac41577978420b1,2fd3fd5b943ee57aff93415c707798d9069cea7e..044d11d0594423bc47b59317f079999a8bb958e2
@@@ -785,10 -785,22 +785,22 @@@ static void intel_disable_hdmi(struct i
        }
  }
  
+ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+ {
+       struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+       if (IS_G4X(dev))
+               return 165000;
+       else if (IS_HASWELL(dev))
+               return 300000;
+       else
+               return 225000;
+ }
  static int intel_hdmi_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
  {
-       if (mode->clock > 165000)
+       if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
                return MODE_CLOCK_HIGH;
        if (mode->clock < 20000)
                return MODE_CLOCK_LOW;
@@@ -806,6 -818,7 +818,7 @@@ bool intel_hdmi_compute_config(struct i
        struct drm_device *dev = encoder->base.dev;
        struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
        int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+       int portclock_limit = hdmi_portclock_limit(intel_hdmi);
        int desired_bpp;
  
        if (intel_hdmi->color_range_auto) {
         * outputs. We also need to check that the higher clock still fits
         * within limits.
         */
-       if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000
+       if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
            && HAS_PCH_SPLIT(dev)) {
                DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
                desired_bpp = 12*3;
                pipe_config->pipe_bpp = desired_bpp;
        }
  
-       if (adjusted_mode->clock > 225000) {
+       if (adjusted_mode->clock > portclock_limit) {
                DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
                return false;
        }
@@@ -866,9 -879,6 +879,9 @@@ intel_hdmi_detect(struct drm_connector 
        struct edid *edid;
        enum drm_connector_status status = connector_status_disconnected;
  
 +      DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
 +                    connector->base.id, drm_get_connector_name(connector));
 +
        intel_hdmi->has_hdmi_sink = false;
        intel_hdmi->has_audio = false;
        intel_hdmi->rgb_quant_range_selectable = false;
index afd09d48d72ccbe2262229cb1849af3c811be369,7e96f4f117384faea9bad6b29826888f91973cf3..4708621fe720b8f1624f44b1f4233a0bbe2317e4
@@@ -42,6 -42,9 +42,9 @@@
  #include <ttm/ttm_placement.h>
  #include <ttm/ttm_module.h>
  
+ /* just for ttm_validate_buffer */
+ #include <ttm/ttm_execbuf_util.h>
  #include <drm/qxl_drm.h>
  #include "qxl_dev.h"
  
@@@ -118,9 -121,9 +121,9 @@@ struct qxl_bo 
        uint32_t surface_id;
        struct qxl_fence fence; /* per bo fence  - list of releases */
        struct qxl_release *surf_create;
-       atomic_t reserve_count;
  };
  #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+ #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
  
  struct qxl_gem {
        struct mutex            mutex;
  };
  
  struct qxl_bo_list {
-       struct list_head lhead;
-       struct qxl_bo *bo;
- };
- struct qxl_reloc_list {
-       struct list_head bos;
+       struct ttm_validate_buffer tv;
  };
  
  struct qxl_crtc {
@@@ -195,10 -193,20 +193,20 @@@ enum 
  struct qxl_release {
        int id;
        int type;
-       int bo_count;
        uint32_t release_offset;
        uint32_t surface_release_id;
-       struct qxl_bo *bos[QXL_MAX_RES];
+       struct ww_acquire_ctx ticket;
+       struct list_head bos;
+ };
+ struct qxl_drm_chunk {
+       struct list_head head;
+       struct qxl_bo *bo;
+ };
+ struct qxl_drm_image {
+       struct qxl_bo *bo;
+       struct list_head chunk_list;
  };
  
  struct qxl_fb_image {
@@@ -314,12 -322,13 +322,13 @@@ struct qxl_device 
        struct workqueue_struct *gc_queue;
        struct work_struct gc_work;
  
+       struct work_struct fb_work;
  };
  
  /* forward declaration for QXL_INFO_IO */
  void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
  
 -extern struct drm_ioctl_desc qxl_ioctls[];
 +extern const struct drm_ioctl_desc qxl_ioctls[];
  extern int qxl_max_ioctl;
  
  int qxl_driver_load(struct drm_device *dev, unsigned long flags);
@@@ -418,6 -427,9 +427,6 @@@ int qxl_bo_kmap(struct qxl_bo *bo, voi
  int qxl_mode_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
 -int qxl_mode_dumb_destroy(struct drm_file *file_priv,
 -                        struct drm_device *dev,
 -                        uint32_t handle);
  int qxl_mode_dumb_mmap(struct drm_file *filp,
                       struct drm_device *dev,
                       uint32_t handle, uint64_t *offset_p);
@@@ -430,12 -442,19 +439,19 @@@ int qxl_mmap(struct file *filp, struct 
  
  /* qxl image */
  
- int qxl_image_create(struct qxl_device *qdev,
-                    struct qxl_release *release,
-                    struct qxl_bo **image_bo,
-                    const uint8_t *data,
-                    int x, int y, int width, int height,
-                    int depth, int stride);
+ int qxl_image_init(struct qxl_device *qdev,
+                  struct qxl_release *release,
+                  struct qxl_drm_image *dimage,
+                  const uint8_t *data,
+                  int x, int y, int width, int height,
+                  int depth, int stride);
+ int
+ qxl_image_alloc_objects(struct qxl_device *qdev,
+                       struct qxl_release *release,
+                       struct qxl_drm_image **image_ptr,
+                       int height, int stride);
+ void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
  void qxl_update_screen(struct qxl_device *qxl);
  
  /* qxl io operations (qxl_cmd.c) */
@@@ -456,20 -475,15 +472,15 @@@ int qxl_ring_push(struct qxl_ring *ring
  void qxl_io_flush_release(struct qxl_device *qdev);
  void qxl_io_flush_surfaces(struct qxl_device *qdev);
  
- int qxl_release_reserve(struct qxl_device *qdev,
-                       struct qxl_release *release, bool no_wait);
- void qxl_release_unreserve(struct qxl_device *qdev,
-                          struct qxl_release *release);
  union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
                                        struct qxl_release *release);
  void qxl_release_unmap(struct qxl_device *qdev,
                       struct qxl_release *release,
                       union qxl_release_info *info);
- /*
-  * qxl_bo_add_resource.
-  *
-  */
- void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
+ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
+ void qxl_release_backoff_reserve_list(struct qxl_release *release);
+ void qxl_release_fence_buffer_objects(struct qxl_release *release);
  
  int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
                                       enum qxl_surface_cmd_type surface_cmd_type,
  int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
                               int type, struct qxl_release **release,
                               struct qxl_bo **rbo);
- int qxl_fence_releaseable(struct qxl_device *qdev,
-                         struct qxl_release *release);
  int
  qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
                              uint32_t type, bool interruptible);
  int
  qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
                             uint32_t type, bool interruptible);
- int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+                         struct qxl_release *release,
+                         unsigned long size,
                          struct qxl_bo **_bo);
  /* qxl drawing commands */
  
@@@ -507,15 -522,9 +519,9 @@@ void qxl_draw_copyarea(struct qxl_devic
                       u32 sx, u32 sy,
                       u32 dx, u32 dy);
  
- uint64_t
- qxl_release_alloc(struct qxl_device *qdev, int type,
-                 struct qxl_release **ret);
  void qxl_release_free(struct qxl_device *qdev,
                      struct qxl_release *release);
- void qxl_release_add_res(struct qxl_device *qdev,
-                        struct qxl_release *release,
-                        struct qxl_bo *bo);
  /* used by qxl_debugfs_release */
  struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
                                                   uint64_t id);
@@@ -558,7 -567,7 +564,7 @@@ void qxl_surface_evict(struct qxl_devic
  int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
  
  /* qxl_fence.c */
int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
  int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
  int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
  void qxl_fence_fini(struct qxl_fence *qfence);
index 6cd7273c08041d12b5b394199ad597bbfd8da9dc,6de33563d6f186fe56745137a7e8b3c3b324e5a8..7b95c75e9626ae1324dea48b310d3052f0c00d2d
@@@ -68,55 -68,60 +68,60 @@@ static int qxl_map_ioctl(struct drm_dev
                                  &qxl_map->offset);
  }
  
+ struct qxl_reloc_info {
+       int type;
+       struct qxl_bo *dst_bo;
+       uint32_t dst_offset;
+       struct qxl_bo *src_bo;
+       int src_offset;
+ };
  /*
   * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
   * are on vram).
   * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
   */
  static void
- apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
-           struct qxl_bo *src, uint64_t src_off)
+ apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
  {
        void *reloc_page;
-       reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
-       *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
-                                                                    src, src_off);
-       qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+       reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+       *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+                                                                                             info->src_bo,
+                                                                                             info->src_offset);
+       qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
  }
  
  static void
- apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
-                struct qxl_bo *src)
+ apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
  {
        uint32_t id = 0;
        void *reloc_page;
  
-       if (src && !src->is_primary)
-               id = src->surface_id;
+       if (info->src_bo && !info->src_bo->is_primary)
+               id = info->src_bo->surface_id;
  
-       reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
-       *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
-       qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+       reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+       *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
+       qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
  }
  
  /* return holding the reference to this object */
  static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
                                         struct drm_file *file_priv, uint64_t handle,
-                                        struct qxl_reloc_list *reloc_list)
+                                        struct qxl_release *release)
  {
        struct drm_gem_object *gobj;
        struct qxl_bo *qobj;
        int ret;
  
        gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
-       if (!gobj) {
-               DRM_ERROR("bad bo handle %lld\n", handle);
+       if (!gobj)
                return NULL;
-       }
        qobj = gem_to_qxl_bo(gobj);
  
-       ret = qxl_bo_list_add(reloc_list, qobj);
+       ret = qxl_release_list_add(release, qobj);
        if (ret)
                return NULL;
  
   * However, the command as passed from user space must *not* contain the initial
   * QXLReleaseInfo struct (first XXX bytes)
   */
- static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
-                               struct drm_file *file_priv)
+ static int qxl_process_single_command(struct qxl_device *qdev,
+                                     struct drm_qxl_command *cmd,
+                                     struct drm_file *file_priv)
  {
-       struct qxl_device *qdev = dev->dev_private;
-       struct drm_qxl_execbuffer *execbuffer = data;
-       struct drm_qxl_command user_cmd;
-       int cmd_num;
-       struct qxl_bo *reloc_src_bo;
-       struct qxl_bo *reloc_dst_bo;
-       struct drm_qxl_reloc reloc;
+       struct qxl_reloc_info *reloc_info;
+       int release_type;
+       struct qxl_release *release;
+       struct qxl_bo *cmd_bo;
        void *fb_cmd;
-       int i, ret;
-       struct qxl_reloc_list reloc_list;
+       int i, j, ret, num_relocs;
        int unwritten;
-       uint32_t reloc_dst_offset;
-       INIT_LIST_HEAD(&reloc_list.bos);
  
-       for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
-               struct qxl_release *release;
-               struct qxl_bo *cmd_bo;
-               int release_type;
-               struct drm_qxl_command *commands =
-                       (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+       switch (cmd->type) {
+       case QXL_CMD_DRAW:
+               release_type = QXL_RELEASE_DRAWABLE;
+               break;
+       case QXL_CMD_SURFACE:
+       case QXL_CMD_CURSOR:
+       default:
+               DRM_DEBUG("Only draw commands in execbuffers\n");
+               return -EINVAL;
+               break;
+       }
  
-               if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
-                                      sizeof(user_cmd)))
-                       return -EFAULT;
-               switch (user_cmd.type) {
-               case QXL_CMD_DRAW:
-                       release_type = QXL_RELEASE_DRAWABLE;
-                       break;
-               case QXL_CMD_SURFACE:
-               case QXL_CMD_CURSOR:
-               default:
-                       DRM_DEBUG("Only draw commands in execbuffers\n");
-                       return -EINVAL;
-                       break;
-               }
+       if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+               return -EINVAL;
  
-               if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
-                       return -EINVAL;
+       if (!access_ok(VERIFY_READ,
+                      (void *)(unsigned long)cmd->command,
+                      cmd->command_size))
+               return -EFAULT;
  
-               if (!access_ok(VERIFY_READ,
-                              (void *)(unsigned long)user_cmd.command,
-                              user_cmd.command_size))
-                       return -EFAULT;
+       reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+       if (!reloc_info)
+               return -ENOMEM;
  
-               ret = qxl_alloc_release_reserved(qdev,
-                                                sizeof(union qxl_release_info) +
-                                                user_cmd.command_size,
-                                                release_type,
-                                                &release,
-                                                &cmd_bo);
-               if (ret)
-                       return ret;
+       ret = qxl_alloc_release_reserved(qdev,
+                                        sizeof(union qxl_release_info) +
+                                        cmd->command_size,
+                                        release_type,
+                                        &release,
+                                        &cmd_bo);
+       if (ret)
+               goto out_free_reloc;
  
-               /* TODO copy slow path code from i915 */
-               fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
-               unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+       /* TODO copy slow path code from i915 */
+       fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+       unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
  
-               {
-                       struct qxl_drawable *draw = fb_cmd;
+       {
+               struct qxl_drawable *draw = fb_cmd;
+               draw->mm_time = qdev->rom->mm_clock;
+       }
  
-                       draw->mm_time = qdev->rom->mm_clock;
-               }
-               qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
-               if (unwritten) {
-                       DRM_ERROR("got unwritten %d\n", unwritten);
-                       qxl_release_unreserve(qdev, release);
-                       qxl_release_free(qdev, release);
-                       return -EFAULT;
+       qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+       if (unwritten) {
+               DRM_ERROR("got unwritten %d\n", unwritten);
+               ret = -EFAULT;
+               goto out_free_release;
+       }
+       /* fill out reloc info structs */
+       num_relocs = 0;
+       for (i = 0; i < cmd->relocs_num; ++i) {
+               struct drm_qxl_reloc reloc;
+               if (DRM_COPY_FROM_USER(&reloc,
+                                      &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
+                                      sizeof(reloc))) {
+                       ret = -EFAULT;
+                       goto out_free_bos;
                }
  
-               for (i = 0 ; i < user_cmd.relocs_num; ++i) {
-                       if (DRM_COPY_FROM_USER(&reloc,
-                                              &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
-                                              sizeof(reloc))) {
-                               qxl_bo_list_unreserve(&reloc_list, true);
-                               qxl_release_unreserve(qdev, release);
-                               qxl_release_free(qdev, release);
-                               return -EFAULT;
-                       }
+               /* add the bos to the list of bos to validate -
+                  need to validate first then process relocs? */
+               if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
+                       DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
  
-                       /* add the bos to the list of bos to validate -
-                          need to validate first then process relocs? */
-                       if (reloc.dst_handle) {
-                               reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
-                                                                 reloc.dst_handle, &reloc_list);
-                               if (!reloc_dst_bo) {
-                                       qxl_bo_list_unreserve(&reloc_list, true);
-                                       qxl_release_unreserve(qdev, release);
-                                       qxl_release_free(qdev, release);
-                                       return -EINVAL;
-                               }
-                               reloc_dst_offset = 0;
-                       } else {
-                               reloc_dst_bo = cmd_bo;
-                               reloc_dst_offset = release->release_offset;
+                       ret = -EINVAL;
+                       goto out_free_bos;
+               }
+               reloc_info[i].type = reloc.reloc_type;
+               if (reloc.dst_handle) {
+                       reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+                                                                 reloc.dst_handle, release);
+                       if (!reloc_info[i].dst_bo) {
+                               ret = -EINVAL;
+                               reloc_info[i].src_bo = NULL;
+                               goto out_free_bos;
                        }
-                       /* reserve and validate the reloc dst bo */
-                       if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
-                               reloc_src_bo =
-                                       qxlhw_handle_to_bo(qdev, file_priv,
-                                                          reloc.src_handle, &reloc_list);
-                               if (!reloc_src_bo) {
-                                       if (reloc_dst_bo != cmd_bo)
-                                               drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
-                                       qxl_bo_list_unreserve(&reloc_list, true);
-                                       qxl_release_unreserve(qdev, release);
-                                       qxl_release_free(qdev, release);
-                                       return -EINVAL;
-                               }
-                       } else
-                               reloc_src_bo = NULL;
-                       if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
-                               apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
-                                           reloc_src_bo, reloc.src_offset);
-                       } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
-                               apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
-                       } else {
-                               DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
-                               return -EINVAL;
+                       reloc_info[i].dst_offset = reloc.dst_offset;
+               } else {
+                       reloc_info[i].dst_bo = cmd_bo;
+                       reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
+               }
+               num_relocs++;
+               /* reserve and validate the reloc dst bo */
+               if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+                       reloc_info[i].src_bo =
+                               qxlhw_handle_to_bo(qdev, file_priv,
+                                                  reloc.src_handle, release);
+                       if (!reloc_info[i].src_bo) {
+                               if (reloc_info[i].dst_bo != cmd_bo)
+                                       drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
+                               ret = -EINVAL;
+                               goto out_free_bos;
                        }
+                       reloc_info[i].src_offset = reloc.src_offset;
+               } else {
+                       reloc_info[i].src_bo = NULL;
+                       reloc_info[i].src_offset = 0;
+               }
+       }
  
-                       if (reloc_src_bo && reloc_src_bo != cmd_bo) {
-                               qxl_release_add_res(qdev, release, reloc_src_bo);
-                               drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
-                       }
+       /* validate all buffers */
+       ret = qxl_release_reserve_list(release, false);
+       if (ret)
+               goto out_free_bos;
  
-                       if (reloc_dst_bo != cmd_bo)
-                               drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
-               }
-               qxl_fence_releaseable(qdev, release);
+       for (i = 0; i < cmd->relocs_num; ++i) {
+               if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
+                       apply_reloc(qdev, &reloc_info[i]);
+               else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
+                       apply_surf_reloc(qdev, &reloc_info[i]);
+       }
+       ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
+       if (ret)
+               qxl_release_backoff_reserve_list(release);
+       else
+               qxl_release_fence_buffer_objects(release);
+ out_free_bos:
+       for (j = 0; j < num_relocs; j++) {
+               if (reloc_info[j].dst_bo != cmd_bo)
+                       drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
+               if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
+                       drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
+       }
+ out_free_release:
+       if (ret)
+               qxl_release_free(qdev, release);
+ out_free_reloc:
+       kfree(reloc_info);
+       return ret;
+ }
+ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+ {
+       struct qxl_device *qdev = dev->dev_private;
+       struct drm_qxl_execbuffer *execbuffer = data;
+       struct drm_qxl_command user_cmd;
+       int cmd_num;
+       int ret;
+       for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
  
-               ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
-               if (ret == -ERESTARTSYS) {
-                       qxl_release_unreserve(qdev, release);
-                       qxl_release_free(qdev, release);
-                       qxl_bo_list_unreserve(&reloc_list, true);
+               struct drm_qxl_command *commands =
+                       (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+               if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+                                      sizeof(user_cmd)))
+                       return -EFAULT;
+               ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
+               if (ret)
                        return ret;
-               }
-               qxl_release_unreserve(qdev, release);
        }
-       qxl_bo_list_unreserve(&reloc_list, 0);
        return 0;
  }
  
@@@ -305,7 -336,7 +336,7 @@@ static int qxl_update_area_ioctl(struc
                goto out;
  
        if (!qobj->pin_count) {
-               qxl_ttm_placement_from_domain(qobj, qobj->type);
+               qxl_ttm_placement_from_domain(qobj, qobj->type, false);
                ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
                                      true, false);
                if (unlikely(ret))
@@@ -402,7 -433,7 +433,7 @@@ static int qxl_alloc_surf_ioctl(struct 
        return ret;
  }
  
 -struct drm_ioctl_desc qxl_ioctls[] = {
 +const struct drm_ioctl_desc qxl_ioctls[] = {
        DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
  
        DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
index af10165adb0d49570980fc849b312d0abb9f24a0,8cb6167038e544625ec666a6fef7b4308d8e832d..d458a140c02407c01858f2d7b8a212e80417db3a
@@@ -59,7 -59,7 +59,7 @@@ static inline unsigned long qxl_bo_size
  
  static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
  {
 -      return bo->tbo.addr_space_offset;
 +      return drm_vma_node_offset_addr(&bo->tbo.vma_node);
  }
  
  static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
@@@ -88,7 -88,7 +88,7 @@@
  
  extern int qxl_bo_create(struct qxl_device *qdev,
                         unsigned long size,
-                        bool kernel, u32 domain,
+                        bool kernel, bool pinned, u32 domain,
                         struct qxl_surface *surf,
                         struct qxl_bo **bo_ptr);
  extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
@@@ -99,9 -99,7 +99,7 @@@ extern struct qxl_bo *qxl_bo_ref(struc
  extern void qxl_bo_unref(struct qxl_bo **bo);
  extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
  extern int qxl_bo_unpin(struct qxl_bo *bo);
- extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+ extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
  extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
  
- extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
- extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
  #endif
index 1a648e1da6a6ac7f295fc6418e5ad8873b08236a,b61449e52cd5695c7c024548ecedfc3340b11156..0109a9644cb29ef7a6e13ac76a90544effcefd20
@@@ -38,7 -38,8 +38,8 @@@
  
  static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
  static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
- uint64_t
+ static uint64_t
  qxl_release_alloc(struct qxl_device *qdev, int type,
                  struct qxl_release **ret)
  {
@@@ -53,9 -54,9 +54,9 @@@
                return 0;
        }
        release->type = type;
-       release->bo_count = 0;
        release->release_offset = 0;
        release->surface_release_id = 0;
+       INIT_LIST_HEAD(&release->bos);
  
        idr_preload(GFP_KERNEL);
        spin_lock(&qdev->release_idr_lock);
@@@ -77,20 -78,20 +78,20 @@@ voi
  qxl_release_free(struct qxl_device *qdev,
                 struct qxl_release *release)
  {
-       int i;
-       QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
-                release->type, release->bo_count);
+       struct qxl_bo_list *entry, *tmp;
+       QXL_INFO(qdev, "release %d, type %d\n", release->id,
+                release->type);
  
        if (release->surface_release_id)
                qxl_surface_id_dealloc(qdev, release->surface_release_id);
  
-       for (i = 0 ; i < release->bo_count; ++i) {
+       list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
+               struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
                QXL_INFO(qdev, "release %llx\n",
-                       drm_vma_node_offset_addr(&release->bos[i]->tbo.vma_node)
 -                      entry->tv.bo->addr_space_offset
++                      drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
                                                - DRM_FILE_OFFSET);
-               qxl_fence_remove_release(&release->bos[i]->fence, release->id);
-               qxl_bo_unref(&release->bos[i]);
+               qxl_fence_remove_release(&bo->fence, release->id);
+               qxl_bo_unref(&bo);
        }
        spin_lock(&qdev->release_idr_lock);
        idr_remove(&qdev->release_idr, release->id);
        kfree(release);
  }
  
- void
- qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
-                   struct qxl_bo *bo)
- {
-       int i;
-       for (i = 0; i < release->bo_count; i++)
-               if (release->bos[i] == bo)
-                       return;
-       if (release->bo_count >= QXL_MAX_RES) {
-               DRM_ERROR("exceeded max resource on a qxl_release item\n");
-               return;
-       }
-       release->bos[release->bo_count++] = qxl_bo_ref(bo);
- }
  static int qxl_release_bo_alloc(struct qxl_device *qdev,
                                struct qxl_bo **bo)
  {
        int ret;
-       ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+       /* pin releases bo's they are too messy to evict */
+       ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
+                           QXL_GEM_DOMAIN_VRAM, NULL,
                            bo);
        return ret;
  }
  
- int qxl_release_reserve(struct qxl_device *qdev,
-                       struct qxl_release *release, bool no_wait)
+ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
+ {
+       struct qxl_bo_list *entry;
+       list_for_each_entry(entry, &release->bos, tv.head) {
+               if (entry->tv.bo == &bo->tbo)
+                       return 0;
+       }
+       entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+       qxl_bo_ref(bo);
+       entry->tv.bo = &bo->tbo;
+       list_add_tail(&entry->tv.head, &release->bos);
+       return 0;
+ }
+ static int qxl_release_validate_bo(struct qxl_bo *bo)
  {
        int ret;
-       if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
-               ret = qxl_bo_reserve(release->bos[0], no_wait);
+       if (!bo->pin_count) {
+               qxl_ttm_placement_from_domain(bo, bo->type, false);
+               ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+                                     true, false);
                if (ret)
                        return ret;
        }
+       /* allocate a surface for reserved + validated buffers */
+       ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+       if (ret)
+               return ret;
+       return 0;
+ }
+ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
+ {
+       int ret;
+       struct qxl_bo_list *entry;
+       /* if only one object on the release its the release itself
+          since these objects are pinned no need to reserve */
+       if (list_is_singular(&release->bos))
+               return 0;
+       ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
+       if (ret)
+               return ret;
+       list_for_each_entry(entry, &release->bos, tv.head) {
+               struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+               ret = qxl_release_validate_bo(bo);
+               if (ret) {
+                       ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+                       return ret;
+               }
+       }
        return 0;
  }
  
- void qxl_release_unreserve(struct qxl_device *qdev,
-                         struct qxl_release *release)
+ void qxl_release_backoff_reserve_list(struct qxl_release *release)
  {
-       if (atomic_dec_and_test(&release->bos[0]->reserve_count))
-               qxl_bo_unreserve(release->bos[0]);
+       /* if only one object on the release its the release itself
+          since these objects are pinned no need to reserve */
+       if (list_is_singular(&release->bos))
+               return;
+       ttm_eu_backoff_reservation(&release->ticket, &release->bos);
  }
  
  int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
                                       enum qxl_surface_cmd_type surface_cmd_type,
                                       struct qxl_release *create_rel,
                                       struct qxl_release **release)
  {
-       int ret;
        if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
                int idr_ret;
+               struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
                struct qxl_bo *bo;
                union qxl_release_info *info;
  
                /* stash the release after the create command */
                idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
-               bo = qxl_bo_ref(create_rel->bos[0]);
+               bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
  
                (*release)->release_offset = create_rel->release_offset + 64;
  
-               qxl_release_add_res(qdev, *release, bo);
+               qxl_release_list_add(*release, bo);
  
-               ret = qxl_release_reserve(qdev, *release, false);
-               if (ret) {
-                       DRM_ERROR("release reserve failed\n");
-                       goto out_unref;
-               }
                info = qxl_release_map(qdev, *release);
                info->id = idr_ret;
                qxl_release_unmap(qdev, *release, info);
  
- out_unref:
                qxl_bo_unref(&bo);
-               return ret;
+               return 0;
        }
  
        return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
@@@ -187,7 -222,7 +222,7 @@@ int qxl_alloc_release_reserved(struct q
  {
        struct qxl_bo *bo;
        int idr_ret;
-       int ret;
+       int ret = 0;
        union qxl_release_info *info;
        int cur_idx;
  
                        mutex_unlock(&qdev->release_mutex);
                        return ret;
                }
-               /* pin releases bo's they are too messy to evict */
-               ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
-               qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
-               qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
        }
  
        bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
        if (rbo)
                *rbo = bo;
  
-       qxl_release_add_res(qdev, *release, bo);
-       ret = qxl_release_reserve(qdev, *release, false);
        mutex_unlock(&qdev->release_mutex);
-       if (ret)
-               goto out_unref;
+       qxl_release_list_add(*release, bo);
  
        info = qxl_release_map(qdev, *release);
        info->id = idr_ret;
        qxl_release_unmap(qdev, *release, info);
  
- out_unref:
        qxl_bo_unref(&bo);
        return ret;
  }
  
- int qxl_fence_releaseable(struct qxl_device *qdev,
-                         struct qxl_release *release)
- {
-       int i, ret;
-       for (i = 0; i < release->bo_count; i++) {
-               if (!release->bos[i]->tbo.sync_obj)
-                       release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
-               ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
-               if (ret)
-                       return ret;
-       }
-       return 0;
- }
  struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
                                                   uint64_t id)
  {
                DRM_ERROR("failed to find id in release_idr\n");
                return NULL;
        }
-       if (release->bo_count < 1) {
-               DRM_ERROR("read a released resource with 0 bos\n");
-               return NULL;
-       }
        return release;
  }
  
@@@ -285,9 -294,12 +294,12 @@@ union qxl_release_info *qxl_release_map
  {
        void *ptr;
        union qxl_release_info *info;
-       struct qxl_bo *bo = release->bos[0];
+       struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+       struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
  
        ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+       if (!ptr)
+               return NULL;
        info = ptr + (release->release_offset & ~PAGE_SIZE);
        return info;
  }
@@@ -296,9 -308,51 +308,51 @@@ void qxl_release_unmap(struct qxl_devic
                       struct qxl_release *release,
                       union qxl_release_info *info)
  {
-       struct qxl_bo *bo = release->bos[0];
+       struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+       struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
        void *ptr;
  
        ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
        qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
  }
+ void qxl_release_fence_buffer_objects(struct qxl_release *release)
+ {
+       struct ttm_validate_buffer *entry;
+       struct ttm_buffer_object *bo;
+       struct ttm_bo_global *glob;
+       struct ttm_bo_device *bdev;
+       struct ttm_bo_driver *driver;
+       struct qxl_bo *qbo;
+       /* if only one object on the release its the release itself
+          since these objects are pinned no need to reserve */
+       if (list_is_singular(&release->bos))
+               return;
+       bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
+       bdev = bo->bdev;
+       driver = bdev->driver;
+       glob = bo->glob;
+       spin_lock(&glob->lru_lock);
+       spin_lock(&bdev->fence_lock);
+       list_for_each_entry(entry, &release->bos, head) {
+               bo = entry->bo;
+               qbo = to_qxl_bo(bo);
+               if (!entry->bo->sync_obj)
+                       entry->bo->sync_obj = &qbo->fence;
+               qxl_fence_add_release_locked(&qbo->fence, release->id);
+               ttm_bo_add_to_lru(bo);
+               ww_mutex_unlock(&bo->resv->lock);
+               entry->reserved = false;
+       }
+       spin_unlock(&bdev->fence_lock);
+       spin_unlock(&glob->lru_lock);
+       ww_acquire_fini(&release->ticket);
+ }
index c239739736db5f047e495af913b8e7334e9cc2f7,32501f6ec991169917af2446db4acbd37f8fbba0..3569d89b9e41c9c69250bffc8b75fce591b69653
@@@ -44,6 -44,41 +44,41 @@@ static char *pre_emph_names[] = 
  };
  
  /***** radeon AUX functions *****/
+ /* Atom needs data in little endian format
+  * so swap as appropriate when copying data to
+  * or from atom. Note that atom operates on
+  * dw units.
+  */
+ static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+ {
+ #ifdef __BIG_ENDIAN
+       u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+       u32 *dst32, *src32;
+       int i;
+       memcpy(src_tmp, src, num_bytes);
+       src32 = (u32 *)src_tmp;
+       dst32 = (u32 *)dst_tmp;
+       if (to_le) {
+               for (i = 0; i < ((num_bytes + 3) / 4); i++)
+                       dst32[i] = cpu_to_le32(src32[i]);
+               memcpy(dst, dst_tmp, num_bytes);
+       } else {
+               u8 dws = num_bytes & ~3;
+               for (i = 0; i < ((num_bytes + 3) / 4); i++)
+                       dst32[i] = le32_to_cpu(src32[i]);
+               memcpy(dst, dst_tmp, dws);
+               if (num_bytes % 4) {
+                       for (i = 0; i < (num_bytes % 4); i++)
+                               dst[dws+i] = dst_tmp[dws+i];
+               }
+       }
+ #else
+       memcpy(dst, src, num_bytes);
+ #endif
+ }
  union aux_channel_transaction {
        PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
        PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@@ -65,10 -100,10 +100,10 @@@ static int radeon_process_aux_ch(struc
  
        base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
  
-       memcpy(base, send, send_bytes);
+       radeon_copy_swap(base, send, send_bytes, true);
  
-       args.v1.lpAuxRequest = 0 + 4;
-       args.v1.lpDataOut = 16 + 4;
+       args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+       args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
        args.v1.ucDataOutLen = 0;
        args.v1.ucChannelID = chan->rec.i2c_id;
        args.v1.ucDelay = delay / 10;
                recv_bytes = recv_size;
  
        if (recv && recv_size)
-               memcpy(recv, base + 16, recv_bytes);
+               radeon_copy_swap(recv, base + 16, recv_bytes, false);
  
        return recv_bytes;
  }
@@@ -550,7 -585,7 +585,7 @@@ static bool radeon_dp_get_link_status(s
                return false;
        }
  
 -      DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
 +      DRM_DEBUG_KMS("link status %6ph\n", link_status);
        return true;
  }
  
index 319e1ee1844aa4ac128b64941d06ddaf23d0d97c,10f712e37003030e81a7770e94ca696c4c8f2c08..cfc1d28ade39e4d39329feee29dd8a63de410c66
@@@ -2413,8 -2413,8 +2413,8 @@@ int r600_cp_resume(struct radeon_devic
        WREG32(GRBM_SOFT_RESET, 0);
  
        /* Set ring buffer size */
 -      rb_bufsz = drm_order(ring->ring_size / 8);
 -      tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
 +      rb_bufsz = order_base_2(ring->ring_size / 8);
 +      tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  #ifdef __BIG_ENDIAN
        tmp |= BUF_SWAP_32BIT;
  #endif
@@@ -2467,7 -2467,7 +2467,7 @@@ void r600_ring_init(struct radeon_devic
        int r;
  
        /* Align ring size */
 -      rb_bufsz = drm_order(ring_size / 8);
 +      rb_bufsz = order_base_2(ring_size / 8);
        ring_size = (1 << (rb_bufsz + 1)) * 4;
        ring->ring_size = ring_size;
        ring->align_mask = 16 - 1;
@@@ -2547,7 -2547,7 +2547,7 @@@ int r600_dma_resume(struct radeon_devic
        WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
  
        /* Set ring buffer size in dwords */
 -      rb_bufsz = drm_order(ring->ring_size / 4);
 +      rb_bufsz = order_base_2(ring->ring_size / 4);
        rb_cntl = rb_bufsz << 1;
  #ifdef __BIG_ENDIAN
        rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
@@@ -2656,7 -2656,7 +2656,7 @@@ int r600_uvd_rbc_start(struct radeon_de
        WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
  
        /* Set ring buffer size */
 -      rb_bufsz = drm_order(ring->ring_size);
 +      rb_bufsz = order_base_2(ring->ring_size);
        rb_bufsz = (0x1 << 8) | rb_bufsz;
        WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
  
@@@ -3166,7 -3166,7 +3166,7 @@@ int r600_copy_cpdma(struct radeon_devic
  
        size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
        num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
-       r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21);
+       r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
        if (r) {
                DRM_ERROR("radeon: moving bo (%d).\n", r);
                radeon_semaphore_free(rdev, &sem, NULL);
                radeon_semaphore_free(rdev, &sem, NULL);
        }
  
+       radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+       radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+       radeon_ring_write(ring, WAIT_3D_IDLE_bit);
        for (i = 0; i < num_loops; i++) {
                cur_size_in_bytes = size_in_bytes;
                if (cur_size_in_bytes > 0x1fffff)
@@@ -3812,7 -3815,7 +3815,7 @@@ void r600_ih_ring_init(struct radeon_de
        u32 rb_bufsz;
  
        /* Align ring size */
 -      rb_bufsz = drm_order(ring_size / 4);
 +      rb_bufsz = order_base_2(ring_size / 4);
        ring_size = (1 << rb_bufsz) * 4;
        rdev->ih.ring_size = ring_size;
        rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@@ -4049,7 -4052,7 +4052,7 @@@ int r600_irq_init(struct radeon_device 
        WREG32(INTERRUPT_CNTL, interrupt_cntl);
  
        WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
 -      rb_bufsz = drm_order(rdev->ih.ring_size / 4);
 +      rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
  
        ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
                      IH_WPTR_OVERFLOW_CLEAR |
index 5069d9c234bb71a5d9d59bcd63184f2c17240aa0,e113352bacdc9cf3a86ce081d3cf536f8e5537b1..0a9f1bb88337f614a24468970a09b6024415d359
@@@ -21,6 -21,7 +21,7 @@@
  
  #include <drm/drmP.h>
  #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_fb_cma_helper.h>
  #include <drm/drm_gem_cma_helper.h>
  
  #include "rcar_du_crtc.h"
  #include "rcar_du_kms.h"
  #include "rcar_du_regs.h"
  
- /* -----------------------------------------------------------------------------
-  * Core device operations
-  */
- /*
-  * rcar_du_get - Acquire a reference to the DU
-  *
-  * Acquiring a reference enables the device clock and setup core registers. A
-  * reference must be held before accessing any hardware registers.
-  *
-  * This function must be called with the DRM mode_config lock held.
-  *
-  * Return 0 in case of success or a negative error code otherwise.
-  */
- int rcar_du_get(struct rcar_du_device *rcdu)
- {
-       int ret;
-       if (rcdu->use_count)
-               goto done;
-       /* Enable clocks before accessing the hardware. */
-       ret = clk_prepare_enable(rcdu->clock);
-       if (ret < 0)
-               return ret;
-       /* Enable extended features */
-       rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE);
-       rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
-       rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
-       rcar_du_write(rcdu, DEFR4, DEFR4_CODE);
-       rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
-       /* Use DS1PR and DS2PR to configure planes priorities and connects the
-        * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
-        */
-       rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
- done:
-       rcdu->use_count++;
-       return 0;
- }
- /*
-  * rcar_du_put - Release a reference to the DU
-  *
-  * Releasing the last reference disables the device clock.
-  *
-  * This function must be called with the DRM mode_config lock held.
-  */
- void rcar_du_put(struct rcar_du_device *rcdu)
- {
-       if (--rcdu->use_count)
-               return;
-       clk_disable_unprepare(rcdu->clock);
- }
  /* -----------------------------------------------------------------------------
   * DRM operations
   */
  
  static int rcar_du_unload(struct drm_device *dev)
  {
+       struct rcar_du_device *rcdu = dev->dev_private;
+       if (rcdu->fbdev)
+               drm_fbdev_cma_fini(rcdu->fbdev);
        drm_kms_helper_poll_fini(dev);
        drm_mode_config_cleanup(dev);
        drm_vblank_cleanup(dev);
-       drm_irq_uninstall(dev);
  
+       dev->irq_enabled = 0;
        dev->dev_private = NULL;
  
        return 0;
@@@ -107,7 -55,6 +55,6 @@@ static int rcar_du_load(struct drm_devi
        struct platform_device *pdev = dev->platformdev;
        struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
        struct rcar_du_device *rcdu;
-       struct resource *ioarea;
        struct resource *mem;
        int ret;
  
  
        rcdu->dev = &pdev->dev;
        rcdu->pdata = pdata;
+       rcdu->info = (struct rcar_du_device_info *)pdev->id_entry->driver_data;
        rcdu->ddev = dev;
        dev->dev_private = rcdu;
  
-       /* I/O resources and clocks */
+       /* I/O resources */
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (mem == NULL) {
-               dev_err(&pdev->dev, "failed to get memory resource\n");
-               return -EINVAL;
-       }
-       ioarea = devm_request_mem_region(&pdev->dev, mem->start,
-                                        resource_size(mem), pdev->name);
-       if (ioarea == NULL) {
-               dev_err(&pdev->dev, "failed to request memory region\n");
-               return -EBUSY;
-       }
-       rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start,
-                                         resource_size(ioarea));
-       if (rcdu->mmio == NULL) {
-               dev_err(&pdev->dev, "failed to remap memory resource\n");
-               return -ENOMEM;
-       }
-       rcdu->clock = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(rcdu->clock)) {
-               dev_err(&pdev->dev, "failed to get clock\n");
-               return -ENOENT;
-       }
+       rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(rcdu->mmio))
+               return PTR_ERR(rcdu->mmio);
  
        /* DRM/KMS objects */
        ret = rcar_du_modeset_init(rcdu);
                goto done;
        }
  
-       /* IRQ and vblank handling */
+       /* vblank handling */
        ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to initialize vblank\n");
                goto done;
        }
  
-       ret = drm_irq_install(dev);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to install IRQ handler\n");
-               goto done;
-       }
+       dev->irq_enabled = 1;
  
        platform_set_drvdata(pdev, rcdu);
  
@@@ -188,20 -111,15 +111,15 @@@ static void rcar_du_preclose(struct drm
        struct rcar_du_device *rcdu = dev->dev_private;
        unsigned int i;
  
-       for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
+       for (i = 0; i < rcdu->num_crtcs; ++i)
                rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
  }
  
- static irqreturn_t rcar_du_irq(int irq, void *arg)
+ static void rcar_du_lastclose(struct drm_device *dev)
  {
-       struct drm_device *dev = arg;
        struct rcar_du_device *rcdu = dev->dev_private;
-       unsigned int i;
-       for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
-               rcar_du_crtc_irq(&rcdu->crtcs[i]);
  
-       return IRQ_HANDLED;
+       drm_fbdev_cma_restore_mode(rcdu->fbdev);
  }
  
  static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
@@@ -236,12 -154,11 +154,11 @@@ static const struct file_operations rca
  };
  
  static struct drm_driver rcar_du_driver = {
-       .driver_features        = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
-                               | DRIVER_PRIME,
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
        .load                   = rcar_du_load,
        .unload                 = rcar_du_unload,
        .preclose               = rcar_du_preclose,
-       .irq_handler            = rcar_du_irq,
+       .lastclose              = rcar_du_lastclose,
        .get_vblank_counter     = drm_vblank_count,
        .enable_vblank          = rcar_du_enable_vblank,
        .disable_vblank         = rcar_du_disable_vblank,
        .gem_prime_mmap         = drm_gem_cma_prime_mmap,
        .dumb_create            = rcar_du_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
 -      .dumb_destroy           = drm_gem_cma_dumb_destroy,
 +      .dumb_destroy           = drm_gem_dumb_destroy,
        .fops                   = &rcar_du_fops,
        .name                   = "rcar-du",
        .desc                   = "Renesas R-Car Display Unit",
@@@ -313,6 -230,57 +230,57 @@@ static int rcar_du_remove(struct platfo
        return 0;
  }
  
+ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
+       .features = 0,
+       .num_crtcs = 2,
+       .routes = {
+               /* R8A7779 has two RGB outputs and one (currently unsupported)
+                * TCON output.
+                */
+               [RCAR_DU_OUTPUT_DPAD0] = {
+                       .possible_crtcs = BIT(0),
+                       .encoder_type = DRM_MODE_ENCODER_NONE,
+               },
+               [RCAR_DU_OUTPUT_DPAD1] = {
+                       .possible_crtcs = BIT(1) | BIT(0),
+                       .encoder_type = DRM_MODE_ENCODER_NONE,
+               },
+       },
+       .num_lvds = 0,
+ };
+ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
+       .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
+                 | RCAR_DU_FEATURE_DEFR8,
+       .num_crtcs = 3,
+       .routes = {
+               /* R8A7790 has one RGB output, two LVDS outputs and one
+                * (currently unsupported) TCON output.
+                */
+               [RCAR_DU_OUTPUT_DPAD0] = {
+                       .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
+                       .encoder_type = DRM_MODE_ENCODER_NONE,
+               },
+               [RCAR_DU_OUTPUT_LVDS0] = {
+                       .possible_crtcs = BIT(0),
+                       .encoder_type = DRM_MODE_ENCODER_LVDS,
+               },
+               [RCAR_DU_OUTPUT_LVDS1] = {
+                       .possible_crtcs = BIT(2) | BIT(1),
+                       .encoder_type = DRM_MODE_ENCODER_LVDS,
+               },
+       },
+       .num_lvds = 2,
+ };
+ static const struct platform_device_id rcar_du_id_table[] = {
+       { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
+       { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
+       { }
+ };
+ MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
  static struct platform_driver rcar_du_platform_driver = {
        .probe          = rcar_du_probe,
        .remove         = rcar_du_remove,
                .name   = "rcar-du",
                .pm     = &rcar_du_pm_ops,
        },
+       .id_table       = rcar_du_id_table,
  };
  
  module_platform_driver(rcar_du_platform_driver);