]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge remote branch 'korg/drm-fixes' into drm-vmware-next
authorDave Airlie <airlied@redhat.com>
Wed, 6 Oct 2010 01:10:48 +0000 (11:10 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 6 Oct 2010 01:10:48 +0000 (11:10 +1000)
necessary for some of the vmware fixes to be pushed in.

Conflicts:
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/i915/intel_fb.c
include/drm/drmP.h

1  2 
MAINTAINERS
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
include/drm/drmP.h

diff --combined MAINTAINERS
index 534f4d0accb9064a064de6ab5ce6156d19ba5698,ceba39bc1b49cc9ee817ae5e97fe9ed9a551e1a7..5e1169df8c1f7aa67ef8eca21053d657b88c6798
@@@ -2051,15 -2051,6 +2051,15 @@@ S:    Maintaine
  F:    drivers/gpu/drm/
  F:    include/drm/
  
 +INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
 +M:    Chris Wilson <chris@chris-wilson.co.uk>
 +L:    intel-gfx@lists.freedesktop.org
 +L:    dri-devel@lists.freedesktop.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
 +S:    Supported
 +F:    drivers/gpu/drm/i915
 +F:    include/drm/i915*
 +
  DSCC4 DRIVER
  M:    Francois Romieu <romieu@fr.zoreil.com>
  L:    netdev@vger.kernel.org
@@@ -2677,6 -2668,8 +2677,8 @@@ M:      Guenter Roeck <guenter.roeck@ericsso
  L:    lm-sensors@lm-sensors.org
  W:    http://www.lm-sensors.org/
  T:    quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+ T:    quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
  S:    Maintained
  F:    Documentation/hwmon/
  F:    drivers/hwmon/
index 3ea0692ce59a984fb8e2145c15bae06386c5a93a,5663d2719063de9231ca6cc153b63b30422e17aa..ea1c4b019ebf96290c0768202d7277cda47ba0f9
@@@ -92,6 -92,12 +92,6 @@@ drm_gem_init(struct drm_device *dev
  
        spin_lock_init(&dev->object_name_lock);
        idr_init(&dev->object_name_idr);
 -      atomic_set(&dev->object_count, 0);
 -      atomic_set(&dev->object_memory, 0);
 -      atomic_set(&dev->pin_count, 0);
 -      atomic_set(&dev->pin_memory, 0);
 -      atomic_set(&dev->gtt_count, 0);
 -      atomic_set(&dev->gtt_memory, 0);
  
        mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
        if (!mm) {
@@@ -142,9 -148,12 +142,9 @@@ int drm_gem_object_init(struct drm_devi
                return -ENOMEM;
  
        kref_init(&obj->refcount);
-       kref_init(&obj->handlecount);
+       atomic_set(&obj->handle_count, 0);
        obj->size = size;
  
 -      atomic_inc(&dev->object_count);
 -      atomic_add(obj->size, &dev->object_memory);
 -
        return 0;
  }
  EXPORT_SYMBOL(drm_gem_object_init);
@@@ -171,6 -180,8 +171,6 @@@ drm_gem_object_alloc(struct drm_device 
        return obj;
  fput:
        /* Object_init mangles the global counters - readjust them. */
 -      atomic_dec(&dev->object_count);
 -      atomic_sub(obj->size, &dev->object_memory);
        fput(obj->filp);
  free:
        kfree(obj);
@@@ -425,7 -436,10 +425,7 @@@ drm_gem_release(struct drm_device *dev
  void
  drm_gem_object_release(struct drm_gem_object *obj)
  {
 -      struct drm_device *dev = obj->dev;
        fput(obj->filp);
 -      atomic_dec(&dev->object_count);
 -      atomic_sub(obj->size, &dev->object_memory);
  }
  EXPORT_SYMBOL(drm_gem_object_release);
  
@@@ -448,26 -462,6 +448,6 @@@ drm_gem_object_free(struct kref *kref
  }
  EXPORT_SYMBOL(drm_gem_object_free);
  
- /**
-  * Called after the last reference to the object has been lost.
-  * Must be called without holding struct_mutex
-  *
-  * Frees the object
-  */
- void
- drm_gem_object_free_unlocked(struct kref *kref)
- {
-       struct drm_gem_object *obj = (struct drm_gem_object *) kref;
-       struct drm_device *dev = obj->dev;
-       if (dev->driver->gem_free_object != NULL) {
-               mutex_lock(&dev->struct_mutex);
-               dev->driver->gem_free_object(obj);
-               mutex_unlock(&dev->struct_mutex);
-       }
- }
- EXPORT_SYMBOL(drm_gem_object_free_unlocked);
  static void drm_gem_object_ref_bug(struct kref *list_kref)
  {
        BUG();
   * called before drm_gem_object_free or we'll be touching
   * freed memory
   */
- void
- drm_gem_object_handle_free(struct kref *kref)
+ void drm_gem_object_handle_free(struct drm_gem_object *obj)
  {
-       struct drm_gem_object *obj = container_of(kref,
-                                                 struct drm_gem_object,
-                                                 handlecount);
        struct drm_device *dev = obj->dev;
  
        /* Remove any name for this object */
@@@ -512,6 -502,10 +488,10 @@@ void drm_gem_vm_open(struct vm_area_str
        struct drm_gem_object *obj = vma->vm_private_data;
  
        drm_gem_object_reference(obj);
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_open_locked(vma);
+       mutex_unlock(&obj->dev->struct_mutex);
  }
  EXPORT_SYMBOL(drm_gem_vm_open);
  
@@@ -519,7 -513,10 +499,10 @@@ void drm_gem_vm_close(struct vm_area_st
  {
        struct drm_gem_object *obj = vma->vm_private_data;
  
-       drm_gem_object_unreference_unlocked(obj);
+       mutex_lock(&obj->dev->struct_mutex);
+       drm_vm_close_locked(vma);
+       drm_gem_object_unreference(obj);
+       mutex_unlock(&obj->dev->struct_mutex);
  }
  EXPORT_SYMBOL(drm_gem_vm_close);
  
index 5aff08e236cfc783008eec7c3cb43ff95aeca9bc,974e970ce3f81ce014170b90ad1b8adc8a1dd5a9..3cdbaf379bb51324110d5d0ace0c9dc2dd24059e
@@@ -255,7 -255,7 +255,7 @@@ int drm_gem_one_name_info(int id, void 
  
        seq_printf(m, "%6d %8zd %7d %8d\n",
                   obj->name, obj->size,
-                  atomic_read(&obj->handlecount.refcount),
+                  atomic_read(&obj->handle_count),
                   atomic_read(&obj->refcount.refcount));
        return 0;
  }
@@@ -270,6 -270,20 +270,6 @@@ int drm_gem_name_info(struct seq_file *
        return 0;
  }
  
 -int drm_gem_object_info(struct seq_file *m, void* data)
 -{
 -      struct drm_info_node *node = (struct drm_info_node *) m->private;
 -      struct drm_device *dev = node->minor->dev;
 -
 -      seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
 -      seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
 -      seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
 -      seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
 -      seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
 -      seq_printf(m, "%d gtt total\n", dev->gtt_total);
 -      return 0;
 -}
 -
  #if DRM_DEBUG_CODE
  
  int drm_vma_info(struct seq_file *m, void *data)
diff --combined drivers/gpu/drm/drm_vm.c
index ee879d6bb5225b5c7eeab06d4c6ece819b5f414a,5df450683aab8649511aaa96aaa759452b022fc0..2c3fcbdfd8ff64f8c5a53ff35884f59ce51bd621
@@@ -433,15 -433,7 +433,7 @@@ static void drm_vm_open(struct vm_area_
        mutex_unlock(&dev->struct_mutex);
  }
  
- /**
-  * \c close method for all virtual memory types.
-  *
-  * \param vma virtual memory area.
-  *
-  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
-  * free it.
-  */
- static void drm_vm_close(struct vm_area_struct *vma)
+ void drm_vm_close_locked(struct vm_area_struct *vma)
  {
        struct drm_file *priv = vma->vm_file->private_data;
        struct drm_device *dev = priv->minor->dev;
                  vma->vm_start, vma->vm_end - vma->vm_start);
        atomic_dec(&dev->vma_count);
  
-       mutex_lock(&dev->struct_mutex);
        list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
                if (pt->vma == vma) {
                        list_del(&pt->head);
                        break;
                }
        }
+ }
+ /**
+  * \c close method for all virtual memory types.
+  *
+  * \param vma virtual memory area.
+  *
+  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+  * free it.
+  */
+ static void drm_vm_close(struct vm_area_struct *vma)
+ {
+       struct drm_file *priv = vma->vm_file->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       mutex_lock(&dev->struct_mutex);
+       drm_vm_close_locked(vma);
        mutex_unlock(&dev->struct_mutex);
  }
  
@@@ -515,7 -523,14 +523,7 @@@ static int drm_mmap_dma(struct file *fi
        return 0;
  }
  
 -resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
 -{
 -      return map->offset;
 -}
 -
 -EXPORT_SYMBOL(drm_core_get_map_ofs);
 -
 -resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 +static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
  {
  #ifdef __alpha__
        return dev->hose->dense_mem_base - dev->hose->mem_space->start;
  #endif
  }
  
 -EXPORT_SYMBOL(drm_core_get_reg_ofs);
 -
  /**
   * mmap DMA memory.
   *
@@@ -610,7 -627,7 +618,7 @@@ int drm_mmap_locked(struct file *filp, 
  #endif
        case _DRM_FRAME_BUFFER:
        case _DRM_REGISTERS:
 -              offset = dev->driver->get_reg_ofs(dev);
 +              offset = drm_core_get_reg_ofs(dev);
                vma->vm_flags |= VM_IO; /* not in core dump */
                vma->vm_page_prot = drm_io_prot(map->type, vma);
  #if !defined(__arm__)
index 29e97c075421f2776bfd2eb2bd06062d68080778,4cdf74264ee8062e5f436c21a6fd08eb51176c89..100a7537980e1c65eb4f4c2f4d0c89b3224d5451
@@@ -37,9 -37,7 +37,9 @@@
  #include <linux/intel-gtt.h>
  
  static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
 -static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
 +
 +static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
 +                                                bool pipelined);
  static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
  static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
  static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@@ -48,8 -46,7 +48,8 @@@ static int i915_gem_object_set_cpu_read
                                                     uint64_t offset,
                                                     uint64_t size);
  static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
 -static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
 +static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
 +                                        bool interruptible);
  static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                                           unsigned alignment);
  static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
@@@ -58,111 -55,9 +58,111 @@@ static int i915_gem_phys_pwrite(struct 
                                struct drm_file *file_priv);
  static void i915_gem_free_object_tail(struct drm_gem_object *obj);
  
 +static int
 +i915_gem_object_get_pages(struct drm_gem_object *obj,
 +                        gfp_t gfpmask);
 +
 +static void
 +i915_gem_object_put_pages(struct drm_gem_object *obj);
 +
  static LIST_HEAD(shrink_list);
  static DEFINE_SPINLOCK(shrink_list_lock);
  
 +/* some bookkeeping */
 +static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
 +                                size_t size)
 +{
 +      dev_priv->mm.object_count++;
 +      dev_priv->mm.object_memory += size;
 +}
 +
 +static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
 +                                   size_t size)
 +{
 +      dev_priv->mm.object_count--;
 +      dev_priv->mm.object_memory -= size;
 +}
 +
 +static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
 +                                size_t size)
 +{
 +      dev_priv->mm.gtt_count++;
 +      dev_priv->mm.gtt_memory += size;
 +}
 +
 +static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
 +                                   size_t size)
 +{
 +      dev_priv->mm.gtt_count--;
 +      dev_priv->mm.gtt_memory -= size;
 +}
 +
 +static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
 +                                size_t size)
 +{
 +      dev_priv->mm.pin_count++;
 +      dev_priv->mm.pin_memory += size;
 +}
 +
 +static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
 +                                   size_t size)
 +{
 +      dev_priv->mm.pin_count--;
 +      dev_priv->mm.pin_memory -= size;
 +}
 +
 +int
 +i915_gem_check_is_wedged(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct completion *x = &dev_priv->error_completion;
 +      unsigned long flags;
 +      int ret;
 +
 +      if (!atomic_read(&dev_priv->mm.wedged))
 +              return 0;
 +
 +      ret = wait_for_completion_interruptible(x);
 +      if (ret)
 +              return ret;
 +
 +      /* Success, we reset the GPU! */
 +      if (!atomic_read(&dev_priv->mm.wedged))
 +              return 0;
 +
 +      /* GPU is hung, bump the completion count to account for
 +       * the token we just consumed so that we never hit zero and
 +       * end up waiting upon a subsequent completion event that
 +       * will never happen.
 +       */
 +      spin_lock_irqsave(&x->wait.lock, flags);
 +      x->done++;
 +      spin_unlock_irqrestore(&x->wait.lock, flags);
 +      return -EIO;
 +}
 +
 +static int i915_mutex_lock_interruptible(struct drm_device *dev)
 +{
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      int ret;
 +
 +      ret = i915_gem_check_is_wedged(dev);
 +      if (ret)
 +              return ret;
 +
 +      ret = mutex_lock_interruptible(&dev->struct_mutex);
 +      if (ret)
 +              return ret;
 +
 +      if (atomic_read(&dev_priv->mm.wedged)) {
 +              mutex_unlock(&dev->struct_mutex);
 +              return -EAGAIN;
 +      }
 +
 +      WARN_ON(i915_verify_lists(dev));
 +      return 0;
 +}
 +
  static inline bool
  i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
  {
                obj_priv->pin_count == 0;
  }
  
 -int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 +int i915_gem_do_init(struct drm_device *dev,
 +                   unsigned long start,
                     unsigned long end)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        drm_mm_init(&dev_priv->mm.gtt_space, start,
                    end - start);
  
 -      dev->gtt_total = (uint32_t) (end - start);
 +      dev_priv->mm.gtt_total = end - start;
  
        return 0;
  }
@@@ -209,16 -103,14 +209,16 @@@ in
  i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
  {
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_get_aperture *args = data;
  
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
  
 -      args->aper_size = dev->gtt_total;
 -      args->aper_available_size = (args->aper_size -
 -                                   atomic_read(&dev->pin_memory));
 +      mutex_lock(&dev->struct_mutex);
 +      args->aper_size = dev_priv->mm.gtt_total;
 +      args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
 +      mutex_unlock(&dev->struct_mutex);
  
        return 0;
  }
@@@ -244,14 -136,12 +244,12 @@@ i915_gem_create_ioctl(struct drm_devic
                return -ENOMEM;
  
        ret = drm_gem_handle_create(file_priv, obj, &handle);
+       /* drop reference from allocate - handle holds it now */
+       drm_gem_object_unreference_unlocked(obj);
        if (ret) {
-               drm_gem_object_unreference_unlocked(obj);
                return ret;
        }
  
-       /* Sink the floating reference from kref_init(handlecount) */
-       drm_gem_object_handle_unreference_unlocked(obj);
        args->handle = handle;
        return 0;
  }
@@@ -373,9 -263,7 +371,9 @@@ i915_gem_shmem_pread_fast(struct drm_de
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
  
 -      mutex_lock(&dev->struct_mutex);
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret)
 +              return ret;
  
        ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
@@@ -494,9 -382,7 +492,9 @@@ i915_gem_shmem_pread_slow(struct drm_de
  
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  
 -      mutex_lock(&dev->struct_mutex);
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret)
 +              goto fail_put_user_pages;
  
        ret = i915_gem_object_get_pages_or_evict(obj);
        if (ret)
@@@ -576,27 -462,21 +574,27 @@@ i915_gem_pread_ioctl(struct drm_device 
        struct drm_i915_gem_pread *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
 -      int ret;
 +      int ret = 0;
  
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL)
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
  
 -      /* Bounds check source.
 -       *
 -       * XXX: This could use review for overflow issues...
 -       */
 -      if (args->offset > obj->size || args->size > obj->size ||
 -          args->offset + args->size > obj->size) {
 -              drm_gem_object_unreference_unlocked(obj);
 -              return -EINVAL;
 +      /* Bounds check source.  */
 +      if (args->offset > obj->size || args->size > obj->size - args->offset) {
 +              ret = -EINVAL;
 +              goto out;
 +      }
 +
 +      if (args->size == 0)
 +              goto out;
 +
 +      if (!access_ok(VERIFY_WRITE,
 +                     (char __user *)(uintptr_t)args->data_ptr,
 +                     args->size)) {
 +              ret = -EFAULT;
 +              goto out;
        }
  
        if (i915_gem_object_needs_bit17_swizzle(obj)) {
                                                        file_priv);
        }
  
 +out:
        drm_gem_object_unreference_unlocked(obj);
 -
        return ret;
  }
  
@@@ -698,11 -578,11 +696,11 @@@ i915_gem_gtt_pwrite_fast(struct drm_dev
  
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 -      if (!access_ok(VERIFY_READ, user_data, remain))
 -              return -EFAULT;
  
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret)
 +              return ret;
  
 -      mutex_lock(&dev->struct_mutex);
        ret = i915_gem_object_pin(obj, 0);
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
@@@ -797,10 -677,7 +795,10 @@@ i915_gem_gtt_pwrite_slow(struct drm_dev
                goto out_unpin_pages;
        }
  
 -      mutex_lock(&dev->struct_mutex);
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret)
 +              goto out_unpin_pages;
 +
        ret = i915_gem_object_pin(obj, 0);
        if (ret)
                goto out_unlock;
@@@ -874,9 -751,7 +872,9 @@@ i915_gem_shmem_pwrite_fast(struct drm_d
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
  
 -      mutex_lock(&dev->struct_mutex);
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret)
 +              return ret;
  
        ret = i915_gem_object_get_pages(obj, 0);
        if (ret != 0)
@@@ -972,9 -847,7 +970,9 @@@ i915_gem_shmem_pwrite_slow(struct drm_d
  
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  
 -      mutex_lock(&dev->struct_mutex);
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret)
 +              goto fail_put_user_pages;
  
        ret = i915_gem_object_get_pages_or_evict(obj);
        if (ret)
@@@ -1059,20 -932,14 +1057,20 @@@ i915_gem_pwrite_ioctl(struct drm_devic
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
  
 -      /* Bounds check destination.
 -       *
 -       * XXX: This could use review for overflow issues...
 -       */
 -      if (args->offset > obj->size || args->size > obj->size ||
 -          args->offset + args->size > obj->size) {
 -              drm_gem_object_unreference_unlocked(obj);
 -              return -EINVAL;
 +      /* Bounds check destination. */
 +      if (args->offset > obj->size || args->size > obj->size - args->offset) {
 +              ret = -EINVAL;
 +              goto out;
 +      }
 +
 +      if (args->size == 0)
 +              goto out;
 +
 +      if (!access_ok(VERIFY_READ,
 +                     (char __user *)(uintptr_t)args->data_ptr,
 +                     args->size)) {
 +              ret = -EFAULT;
 +              goto out;
        }
  
        /* We can only do the GTT pwrite on untiled buffers, as otherwise
        if (obj_priv->phys_obj)
                ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
        else if (obj_priv->tiling_mode == I915_TILING_NONE &&
 -               dev->gtt_total != 0 &&
 +               obj_priv->gtt_space &&
                 obj->write_domain != I915_GEM_DOMAIN_CPU) {
                ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
                if (ret == -EFAULT) {
                DRM_INFO("pwrite failed %d\n", ret);
  #endif
  
 +out:
        drm_gem_object_unreference_unlocked(obj);
 -
        return ret;
  }
  
@@@ -1148,14 -1015,14 +1146,14 @@@ i915_gem_set_domain_ioctl(struct drm_de
                return -ENOENT;
        obj_priv = to_intel_bo(obj);
  
 -      mutex_lock(&dev->struct_mutex);
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret) {
 +              drm_gem_object_unreference_unlocked(obj);
 +              return ret;
 +      }
  
        intel_mark_busy(dev, obj);
  
 -#if WATCH_BUF
 -      DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
 -               obj, obj->size, read_domains, write_domain);
 -#endif
        if (read_domains & I915_GEM_DOMAIN_GTT) {
                ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
        }
  
 -      
        /* Maintain LRU order of "inactive" objects */
        if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
                list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
@@@ -1197,23 -1065,27 +1195,23 @@@ i915_gem_sw_finish_ioctl(struct drm_dev
  {
        struct drm_i915_gem_sw_finish *args = data;
        struct drm_gem_object *obj;
 -      struct drm_i915_gem_object *obj_priv;
        int ret = 0;
  
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
  
 -      mutex_lock(&dev->struct_mutex);
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 -      if (obj == NULL) {
 -              mutex_unlock(&dev->struct_mutex);
 +      if (obj == NULL)
                return -ENOENT;
 -      }
  
 -#if WATCH_BUF
 -      DRM_INFO("%s: sw_finish %d (%p %zd)\n",
 -               __func__, args->handle, obj, obj->size);
 -#endif
 -      obj_priv = to_intel_bo(obj);
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret) {
 +              drm_gem_object_unreference_unlocked(obj);
 +              return ret;
 +      }
  
        /* Pinned buffers may be scanout, so flush the cache */
 -      if (obj_priv->pin_count)
 +      if (to_intel_bo(obj)->pin_count)
                i915_gem_object_flush_cpu_write_domain(obj);
  
        drm_gem_object_unreference(obj);
@@@ -1305,7 -1177,7 +1303,7 @@@ int i915_gem_fault(struct vm_area_struc
  
        /* Need a new fence register? */
        if (obj_priv->tiling_mode != I915_TILING_NONE) {
 -              ret = i915_gem_object_get_fence_reg(obj);
 +              ret = i915_gem_object_get_fence_reg(obj, true);
                if (ret)
                        goto unlock;
        }
@@@ -1370,7 -1242,7 +1368,7 @@@ i915_gem_create_mmap_offset(struct drm_
                                                    obj->size / PAGE_SIZE, 0, 0);
        if (!list->file_offset_node) {
                DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
 -              ret = -ENOMEM;
 +              ret = -ENOSPC;
                goto out_free_list;
        }
  
        }
  
        list->hash.key = list->file_offset_node->start;
 -      if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
 +      ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
 +      if (ret) {
                DRM_ERROR("failed to add to map hash\n");
 -              ret = -ENOMEM;
                goto out_free_mm;
        }
  
@@@ -1469,14 -1341,14 +1467,14 @@@ i915_gem_get_gtt_alignment(struct drm_g
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
 -      if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
 +      if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
                return 4096;
  
        /*
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
 -      if (IS_I9XX(dev))
 +      if (INTEL_INFO(dev)->gen == 3)
                start = 1024*1024;
        else
                start = 512*1024;
@@@ -1518,11 -1390,7 +1516,11 @@@ i915_gem_mmap_gtt_ioctl(struct drm_devi
        if (obj == NULL)
                return -ENOENT;
  
 -      mutex_lock(&dev->struct_mutex);
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret) {
 +              drm_gem_object_unreference_unlocked(obj);
 +              return ret;
 +      }
  
        obj_priv = to_intel_bo(obj);
  
        return 0;
  }
  
 -void
 +static void
  i915_gem_object_put_pages(struct drm_gem_object *obj)
  {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        obj_priv->pages = NULL;
  }
  
 +static uint32_t
 +i915_gem_next_request_seqno(struct drm_device *dev,
 +                          struct intel_ring_buffer *ring)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +
 +      ring->outstanding_lazy_request = true;
 +      return dev_priv->next_seqno;
 +}
 +
  static void
 -i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
 +i915_gem_object_move_to_active(struct drm_gem_object *obj,
                               struct intel_ring_buffer *ring)
  {
        struct drm_device *dev = obj->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 +      uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
 +
        BUG_ON(ring == NULL);
        obj_priv->ring = ring;
  
                drm_gem_object_reference(obj);
                obj_priv->active = 1;
        }
 +
        /* Move from whatever list we were on to the tail of execution. */
 -      spin_lock(&dev_priv->mm.active_list_lock);
        list_move_tail(&obj_priv->list, &ring->active_list);
 -      spin_unlock(&dev_priv->mm.active_list_lock);
        obj_priv->last_rendering_seqno = seqno;
  }
  
@@@ -1676,8 -1534,9 +1674,8 @@@ i915_gem_object_move_to_inactive(struc
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
        if (obj_priv->pin_count != 0)
 -              list_del_init(&obj_priv->list);
 +              list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
        else
                list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
  
                obj_priv->active = 0;
                drm_gem_object_unreference(obj);
        }
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 +      WARN_ON(i915_verify_lists(dev));
  }
  
  static void
  i915_gem_process_flushing_list(struct drm_device *dev,
 -                             uint32_t flush_domains, uint32_t seqno,
 +                             uint32_t flush_domains,
                               struct intel_ring_buffer *ring)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
                                 gpu_write_list) {
                struct drm_gem_object *obj = &obj_priv->base;
  
 -              if ((obj->write_domain & flush_domains) ==
 -                  obj->write_domain &&
 -                  obj_priv->ring->ring_flag == ring->ring_flag) {
 +              if (obj->write_domain & flush_domains &&
 +                  obj_priv->ring == ring) {
                        uint32_t old_write_domain = obj->write_domain;
  
                        obj->write_domain = 0;
                        list_del_init(&obj_priv->gpu_write_list);
 -                      i915_gem_object_move_to_active(obj, seqno, ring);
 +                      i915_gem_object_move_to_active(obj, ring);
  
                        /* update the fence lru list */
                        if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
  }
  
  uint32_t
 -i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 -               uint32_t flush_domains, struct intel_ring_buffer *ring)
 +i915_add_request(struct drm_device *dev,
 +               struct drm_file *file,
 +               struct drm_i915_gem_request *request,
 +               struct intel_ring_buffer *ring)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
 -      struct drm_i915_file_private *i915_file_priv = NULL;
 -      struct drm_i915_gem_request *request;
 +      struct drm_i915_file_private *file_priv = NULL;
        uint32_t seqno;
        int was_empty;
  
 -      if (file_priv != NULL)
 -              i915_file_priv = file_priv->driver_priv;
 +      if (file != NULL)
 +              file_priv = file->driver_priv;
  
 -      request = kzalloc(sizeof(*request), GFP_KERNEL);
 -      if (request == NULL)
 -              return 0;
 +      if (request == NULL) {
 +              request = kzalloc(sizeof(*request), GFP_KERNEL);
 +              if (request == NULL)
 +                      return 0;
 +      }
  
 -      seqno = ring->add_request(dev, ring, file_priv, flush_domains);
 +      seqno = ring->add_request(dev, ring, 0);
 +      ring->outstanding_lazy_request = false;
  
        request->seqno = seqno;
        request->ring = ring;
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
  
 -      if (i915_file_priv) {
 +      if (file_priv) {
 +              spin_lock(&file_priv->mm.lock);
 +              request->file_priv = file_priv;
                list_add_tail(&request->client_list,
 -                            &i915_file_priv->mm.request_list);
 -      } else {
 -              INIT_LIST_HEAD(&request->client_list);
 +                            &file_priv->mm.request_list);
 +              spin_unlock(&file_priv->mm.lock);
        }
  
 -      /* Associate any objects on the flushing list matching the write
 -       * domain we're flushing with our flush.
 -       */
 -      if (flush_domains != 0) 
 -              i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
 -
        if (!dev_priv->mm.suspended) {
 -              mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
 +              mod_timer(&dev_priv->hangcheck_timer,
 +                        jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
                if (was_empty)
 -                      queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 +                      queue_delayed_work(dev_priv->wq,
 +                                         &dev_priv->mm.retire_work, HZ);
        }
        return seqno;
  }
   * Ensures that all commands in the ring are finished
   * before signalling the CPU
   */
 -static uint32_t
 +static void
  i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
  {
        uint32_t flush_domains = 0;
  
        /* The sampler always gets flushed on i965 (sigh) */
 -      if (IS_I965G(dev))
 +      if (INTEL_INFO(dev)->gen >= 4)
                flush_domains |= I915_GEM_DOMAIN_SAMPLER;
  
        ring->flush(dev, ring,
                        I915_GEM_DOMAIN_COMMAND, flush_domains);
 -      return flush_domains;
  }
  
 -/**
 - * Moves buffers associated only with the given active seqno from the active
 - * to inactive list, potentially freeing them.
 - */
 -static void
 -i915_gem_retire_request(struct drm_device *dev,
 -                      struct drm_i915_gem_request *request)
 +static inline void
 +i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_file_private *file_priv = request->file_priv;
  
 -      trace_i915_gem_request_retire(dev, request->seqno);
 +      if (!file_priv)
 +              return;
  
 -      /* Move any buffers on the active list that are no longer referenced
 -       * by the ringbuffer to the flushing/inactive lists as appropriate.
 -       */
 -      spin_lock(&dev_priv->mm.active_list_lock);
 -      while (!list_empty(&request->ring->active_list)) {
 -              struct drm_gem_object *obj;
 +      spin_lock(&file_priv->mm.lock);
 +      list_del(&request->client_list);
 +      request->file_priv = NULL;
 +      spin_unlock(&file_priv->mm.lock);
 +}
 +
 +static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
 +                                    struct intel_ring_buffer *ring)
 +{
 +      while (!list_empty(&ring->request_list)) {
 +              struct drm_i915_gem_request *request;
 +
 +              request = list_first_entry(&ring->request_list,
 +                                         struct drm_i915_gem_request,
 +                                         list);
 +
 +              list_del(&request->list);
 +              i915_gem_request_remove_from_client(request);
 +              kfree(request);
 +      }
 +
 +      while (!list_empty(&ring->active_list)) {
                struct drm_i915_gem_object *obj_priv;
  
 -              obj_priv = list_first_entry(&request->ring->active_list,
 +              obj_priv = list_first_entry(&ring->active_list,
                                            struct drm_i915_gem_object,
                                            list);
 -              obj = &obj_priv->base;
 -
 -              /* If the seqno being retired doesn't match the oldest in the
 -               * list, then the oldest in the list must still be newer than
 -               * this seqno.
 -               */
 -              if (obj_priv->last_rendering_seqno != request->seqno)
 -                      goto out;
  
 -#if WATCH_LRU
 -              DRM_INFO("%s: retire %d moves to inactive list %p\n",
 -                       __func__, request->seqno, obj);
 -#endif
 -
 -              if (obj->write_domain != 0)
 -                      i915_gem_object_move_to_flushing(obj);
 -              else {
 -                      /* Take a reference on the object so it won't be
 -                       * freed while the spinlock is held.  The list
 -                       * protection for this spinlock is safe when breaking
 -                       * the lock like this since the next thing we do
 -                       * is just get the head of the list again.
 -                       */
 -                      drm_gem_object_reference(obj);
 -                      i915_gem_object_move_to_inactive(obj);
 -                      spin_unlock(&dev_priv->mm.active_list_lock);
 -                      drm_gem_object_unreference(obj);
 -                      spin_lock(&dev_priv->mm.active_list_lock);
 -              }
 +              obj_priv->base.write_domain = 0;
 +              list_del_init(&obj_priv->gpu_write_list);
 +              i915_gem_object_move_to_inactive(&obj_priv->base);
        }
 -out:
 -      spin_unlock(&dev_priv->mm.active_list_lock);
  }
  
 -/**
 - * Returns true if seq1 is later than seq2.
 - */
 -bool
 -i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 +void i915_gem_reset(struct drm_device *dev)
  {
 -      return (int32_t)(seq1 - seq2) >= 0;
 -}
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_gem_object *obj_priv;
 +      int i;
  
 -uint32_t
 -i915_get_gem_seqno(struct drm_device *dev,
 -                 struct intel_ring_buffer *ring)
 -{
 -      return ring->get_gem_seqno(dev, ring);
 +      i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
 +      if (HAS_BSD(dev))
 +              i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
 +
 +      /* Remove anything from the flushing lists. The GPU cache is likely
 +       * to be lost on reset along with the data, so simply move the
 +       * lost bo to the inactive list.
 +       */
 +      while (!list_empty(&dev_priv->mm.flushing_list)) {
 +              obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
 +                                          struct drm_i915_gem_object,
 +                                          list);
 +
 +              obj_priv->base.write_domain = 0;
 +              list_del_init(&obj_priv->gpu_write_list);
 +              i915_gem_object_move_to_inactive(&obj_priv->base);
 +      }
 +
 +      /* Move everything out of the GPU domains to ensure we do any
 +       * necessary invalidation upon reuse.
 +       */
 +      list_for_each_entry(obj_priv,
 +                          &dev_priv->mm.inactive_list,
 +                          list)
 +      {
 +              obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
 +      }
 +
 +      /* The fence registers are invalidated so clear them out */
 +      for (i = 0; i < 16; i++) {
 +              struct drm_i915_fence_reg *reg;
 +
 +              reg = &dev_priv->fence_regs[i];
 +              if (!reg->obj)
 +                      continue;
 +
 +              i915_gem_clear_fence_reg(reg->obj);
 +      }
  }
  
  /**
@@@ -1892,58 -1737,38 +1890,58 @@@ i915_gem_retire_requests_ring(struct dr
        drm_i915_private_t *dev_priv = dev->dev_private;
        uint32_t seqno;
  
 -      if (!ring->status_page.page_addr
 -                      || list_empty(&ring->request_list))
 +      if (!ring->status_page.page_addr ||
 +          list_empty(&ring->request_list))
                return;
  
 -      seqno = i915_get_gem_seqno(dev, ring);
 +      WARN_ON(i915_verify_lists(dev));
  
 +      seqno = ring->get_seqno(dev, ring);
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 -              uint32_t retiring_seqno;
  
                request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
                                           list);
 -              retiring_seqno = request->seqno;
  
 -              if (i915_seqno_passed(seqno, retiring_seqno) ||
 -                  atomic_read(&dev_priv->mm.wedged)) {
 -                      i915_gem_retire_request(dev, request);
 +              if (!i915_seqno_passed(seqno, request->seqno))
 +                      break;
 +
 +              trace_i915_gem_request_retire(dev, request->seqno);
 +
 +              list_del(&request->list);
 +              i915_gem_request_remove_from_client(request);
 +              kfree(request);
 +      }
 +
 +      /* Move any buffers on the active list that are no longer referenced
 +       * by the ringbuffer to the flushing/inactive lists as appropriate.
 +       */
 +      while (!list_empty(&ring->active_list)) {
 +              struct drm_gem_object *obj;
 +              struct drm_i915_gem_object *obj_priv;
 +
 +              obj_priv = list_first_entry(&ring->active_list,
 +                                          struct drm_i915_gem_object,
 +                                          list);
  
 -                      list_del(&request->list);
 -                      list_del(&request->client_list);
 -                      kfree(request);
 -              } else
 +              if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
                        break;
 +
 +              obj = &obj_priv->base;
 +              if (obj->write_domain != 0)
 +                      i915_gem_object_move_to_flushing(obj);
 +              else
 +                      i915_gem_object_move_to_inactive(obj);
        }
  
        if (unlikely (dev_priv->trace_irq_seqno &&
                      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
 -
                ring->user_irq_put(dev, ring);
                dev_priv->trace_irq_seqno = 0;
        }
 +
 +      WARN_ON(i915_verify_lists(dev));
  }
  
  void
@@@ -1970,7 -1795,7 +1968,7 @@@ i915_gem_retire_requests(struct drm_dev
                i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
  }
  
 -void
 +static void
  i915_gem_retire_work_handler(struct work_struct *work)
  {
        drm_i915_private_t *dev_priv;
                                mm.retire_work.work);
        dev = dev_priv->dev;
  
 -      mutex_lock(&dev->struct_mutex);
 +      /* Come back later if the device is busy... */
 +      if (!mutex_trylock(&dev->struct_mutex)) {
 +              queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 +              return;
 +      }
 +
        i915_gem_retire_requests(dev);
  
        if (!dev_priv->mm.suspended &&
  
  int
  i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
 -              int interruptible, struct intel_ring_buffer *ring)
 +                   bool interruptible, struct intel_ring_buffer *ring)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 ier;
        BUG_ON(seqno == 0);
  
        if (atomic_read(&dev_priv->mm.wedged))
 -              return -EIO;
 +              return -EAGAIN;
 +
 +      if (ring->outstanding_lazy_request) {
 +              seqno = i915_add_request(dev, NULL, NULL, ring);
 +              if (seqno == 0)
 +                      return -ENOMEM;
 +      }
 +      BUG_ON(seqno == dev_priv->next_seqno);
  
 -      if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
 +      if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
                if (HAS_PCH_SPLIT(dev))
                        ier = I915_READ(DEIER) | I915_READ(GTIER);
                else
                if (interruptible)
                        ret = wait_event_interruptible(ring->irq_queue,
                                i915_seqno_passed(
 -                                      ring->get_gem_seqno(dev, ring), seqno)
 +                                      ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
                else
                        wait_event(ring->irq_queue,
                                i915_seqno_passed(
 -                                      ring->get_gem_seqno(dev, ring), seqno)
 +                                      ring->get_seqno(dev, ring), seqno)
                                || atomic_read(&dev_priv->mm.wedged));
  
                ring->user_irq_put(dev, ring);
                trace_i915_gem_request_wait_end(dev, seqno);
        }
        if (atomic_read(&dev_priv->mm.wedged))
 -              ret = -EIO;
 +              ret = -EAGAIN;
  
        if (ret && ret != -ERESTARTSYS)
 -              DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
 -                        __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
 +              DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
 +                        __func__, ret, seqno, ring->get_seqno(dev, ring),
 +                        dev_priv->next_seqno);
  
        /* Directly dispatch request retiring.  While we have the work queue
         * to handle this, the waiter on a request often wants an associated
   */
  static int
  i915_wait_request(struct drm_device *dev, uint32_t seqno,
 -              struct intel_ring_buffer *ring)
 +                struct intel_ring_buffer *ring)
  {
        return i915_do_wait_request(dev, seqno, 1, ring);
  }
  
 +static void
 +i915_gem_flush_ring(struct drm_device *dev,
 +                  struct drm_file *file_priv,
 +                  struct intel_ring_buffer *ring,
 +                  uint32_t invalidate_domains,
 +                  uint32_t flush_domains)
 +{
 +      ring->flush(dev, ring, invalidate_domains, flush_domains);
 +      i915_gem_process_flushing_list(dev, flush_domains, ring);
 +}
 +
  static void
  i915_gem_flush(struct drm_device *dev,
 +             struct drm_file *file_priv,
               uint32_t invalidate_domains,
 -             uint32_t flush_domains)
 +             uint32_t flush_domains,
 +             uint32_t flush_rings)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
 +
        if (flush_domains & I915_GEM_DOMAIN_CPU)
                drm_agp_chipset_flush(dev);
 -      dev_priv->render_ring.flush(dev, &dev_priv->render_ring,
 -                      invalidate_domains,
 -                      flush_domains);
  
 -      if (HAS_BSD(dev))
 -              dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
 -                              invalidate_domains,
 -                              flush_domains);
 +      if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
 +              if (flush_rings & RING_RENDER)
 +                      i915_gem_flush_ring(dev, file_priv,
 +                                          &dev_priv->render_ring,
 +                                          invalidate_domains, flush_domains);
 +              if (flush_rings & RING_BSD)
 +                      i915_gem_flush_ring(dev, file_priv,
 +                                          &dev_priv->bsd_ring,
 +                                          invalidate_domains, flush_domains);
 +      }
  }
  
  /**
   * safe to unbind from the GTT or access from the CPU.
   */
  static int
 -i915_gem_object_wait_rendering(struct drm_gem_object *obj)
 +i915_gem_object_wait_rendering(struct drm_gem_object *obj,
 +                             bool interruptible)
  {
        struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
         * it.
         */
        if (obj_priv->active) {
 -#if WATCH_BUF
 -              DRM_INFO("%s: object %p wait for seqno %08x\n",
 -                        __func__, obj, obj_priv->last_rendering_seqno);
 -#endif
 -              ret = i915_wait_request(dev,
 -                              obj_priv->last_rendering_seqno, obj_priv->ring);
 -              if (ret != 0)
 +              ret = i915_do_wait_request(dev,
 +                                         obj_priv->last_rendering_seqno,
 +                                         interruptible,
 +                                         obj_priv->ring);
 +              if (ret)
                        return ret;
        }
  
  i915_gem_object_unbind(struct drm_gem_object *obj)
  {
        struct drm_device *dev = obj->dev;
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret = 0;
  
 -#if WATCH_BUF
 -      DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
 -      DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
 -#endif
        if (obj_priv->gtt_space == NULL)
                return 0;
  
         * should be safe and we need to cleanup or else we might
         * cause memory corruption through use-after-free.
         */
 +      if (ret) {
 +              i915_gem_clflush_object(obj);
 +              obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
 +      }
  
        /* release the fence reg _after_ flushing */
        if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
                i915_gem_clear_fence_reg(obj);
  
 -      if (obj_priv->agp_mem != NULL) {
 -              drm_unbind_agp(obj_priv->agp_mem);
 -              drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
 -              obj_priv->agp_mem = NULL;
 -      }
 +      drm_unbind_agp(obj_priv->agp_mem);
 +      drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
  
        i915_gem_object_put_pages(obj);
        BUG_ON(obj_priv->pages_refcount);
  
 -      if (obj_priv->gtt_space) {
 -              atomic_dec(&dev->gtt_count);
 -              atomic_sub(obj->size, &dev->gtt_memory);
 +      i915_gem_info_remove_gtt(dev_priv, obj->size);
 +      list_del_init(&obj_priv->list);
  
 -              drm_mm_put_block(obj_priv->gtt_space);
 -              obj_priv->gtt_space = NULL;
 -      }
 -
 -      /* Remove ourselves from the LRU list if present. */
 -      spin_lock(&dev_priv->mm.active_list_lock);
 -      if (!list_empty(&obj_priv->list))
 -              list_del_init(&obj_priv->list);
 -      spin_unlock(&dev_priv->mm.active_list_lock);
 +      drm_mm_put_block(obj_priv->gtt_space);
 +      obj_priv->gtt_space = NULL;
  
        if (i915_gem_object_is_purgeable(obj_priv))
                i915_gem_object_truncate(obj);
        return ret;
  }
  
 +static int i915_ring_idle(struct drm_device *dev,
 +                        struct intel_ring_buffer *ring)
 +{
 +      i915_gem_flush_ring(dev, NULL, ring,
 +                          I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 +      return i915_wait_request(dev,
 +                               i915_gem_next_request_seqno(dev, ring),
 +                               ring);
 +}
 +
  int
  i915_gpu_idle(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        bool lists_empty;
 -      uint32_t seqno1, seqno2;
        int ret;
  
 -      spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
                       list_empty(&dev_priv->render_ring.active_list) &&
                       (!HAS_BSD(dev) ||
                        list_empty(&dev_priv->bsd_ring.active_list)));
 -      spin_unlock(&dev_priv->mm.active_list_lock);
 -
        if (lists_empty)
                return 0;
  
        /* Flush everything onto the inactive list. */
 -      i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 -      seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
 -                      &dev_priv->render_ring);
 -      if (seqno1 == 0)
 -              return -ENOMEM;
 -      ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
 +      ret = i915_ring_idle(dev, &dev_priv->render_ring);
 +      if (ret)
 +              return ret;
  
        if (HAS_BSD(dev)) {
 -              seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
 -                              &dev_priv->bsd_ring);
 -              if (seqno2 == 0)
 -                      return -ENOMEM;
 -
 -              ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
 +              ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
                if (ret)
                        return ret;
        }
  
 -
 -      return ret;
 +      return 0;
  }
  
 -int
 +static int
  i915_gem_object_get_pages(struct drm_gem_object *obj,
                          gfp_t gfpmask)
  {
@@@ -2427,8 -2237,7 +2425,8 @@@ static void i830_write_fence_reg(struc
        I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
  }
  
 -static int i915_find_fence_reg(struct drm_device *dev)
 +static int i915_find_fence_reg(struct drm_device *dev,
 +                             bool interruptible)
  {
        struct drm_i915_fence_reg *reg = NULL;
        struct drm_i915_gem_object *obj_priv = NULL;
         * private reference to obj like the other callers of put_fence_reg
         * (set_tiling ioctl) do. */
        drm_gem_object_reference(obj);
 -      ret = i915_gem_object_put_fence_reg(obj);
 +      ret = i915_gem_object_put_fence_reg(obj, interruptible);
        drm_gem_object_unreference(obj);
        if (ret != 0)
                return ret;
   * and tiling format.
   */
  int
 -i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 +i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
 +                            bool interruptible)
  {
        struct drm_device *dev = obj->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
                break;
        }
  
 -      ret = i915_find_fence_reg(dev);
 +      ret = i915_find_fence_reg(dev, interruptible);
        if (ret < 0)
                return ret;
  
@@@ -2590,7 -2398,7 +2588,7 @@@ i915_gem_clear_fence_reg(struct drm_gem
                I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
                break;
        case 3:
 -              if (obj_priv->fence_reg > 8)
 +              if (obj_priv->fence_reg >= 8)
                        fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
                else
        case 2:
   * i915_gem_object_put_fence_reg - waits on outstanding fenced access
   * to the buffer to finish, and then resets the fence register.
   * @obj: tiled object holding a fence register.
 + * @bool: whether the wait upon the fence is interruptible
   *
   * Zeroes out the fence register itself and clears out the associated
   * data structures in dev_priv and obj_priv.
   */
  int
 -i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
 +i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
 +                            bool interruptible)
  {
        struct drm_device *dev = obj->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 +      struct drm_i915_fence_reg *reg;
  
        if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
                return 0;
         * therefore we must wait for any outstanding access to complete
         * before clearing the fence.
         */
 -      if (!IS_I965G(dev)) {
 +      reg = &dev_priv->fence_regs[obj_priv->fence_reg];
 +      if (reg->gpu) {
                int ret;
  
 -              ret = i915_gem_object_flush_gpu_write_domain(obj);
 -              if (ret != 0)
 +              ret = i915_gem_object_flush_gpu_write_domain(obj, true);
 +              if (ret)
                        return ret;
  
 -              ret = i915_gem_object_wait_rendering(obj);
 -              if (ret != 0)
 +              ret = i915_gem_object_wait_rendering(obj, interruptible);
 +              if (ret)
                        return ret;
 +
 +              reg->gpu = false;
        }
  
        i915_gem_object_flush_gtt_write_domain(obj);
 -      i915_gem_clear_fence_reg (obj);
 +      i915_gem_clear_fence_reg(obj);
  
        return 0;
  }
@@@ -2685,7 -2486,7 +2683,7 @@@ i915_gem_object_bind_to_gtt(struct drm_
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
 -      if (obj->size > dev->gtt_total) {
 +      if (obj->size > dev_priv->mm.gtt_total) {
                DRM_ERROR("Attempting to bind an object larger than the aperture\n");
                return -E2BIG;
        }
                /* If the gtt is empty and we're still having trouble
                 * fitting our object in, we're out of memory.
                 */
 -#if WATCH_LRU
 -              DRM_INFO("%s: GTT full, evicting something\n", __func__);
 -#endif
                ret = i915_gem_evict_something(dev, obj->size, alignment);
                if (ret)
                        return ret;
                goto search_free;
        }
  
 -#if WATCH_BUF
 -      DRM_INFO("Binding object of size %zd at 0x%08x\n",
 -               obj->size, obj_priv->gtt_offset);
 -#endif
        ret = i915_gem_object_get_pages(obj, gfpmask);
        if (ret) {
                drm_mm_put_block(obj_priv->gtt_space);
  
                goto search_free;
        }
 -      atomic_inc(&dev->gtt_count);
 -      atomic_add(obj->size, &dev->gtt_memory);
  
        /* keep track of bounds object by adding it to the inactive list */
        list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
 +      i915_gem_info_add_gtt(dev_priv, obj->size);
  
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
@@@ -2790,30 -2599,25 +2788,30 @@@ i915_gem_clflush_object(struct drm_gem_
  
  /** Flushes any GPU write domain for the object if it's dirty. */
  static int
 -i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 +i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
 +                                     bool pipelined)
  {
        struct drm_device *dev = obj->dev;
        uint32_t old_write_domain;
 -      struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  
        if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
  
        /* Queue the GPU write cache flushing we need. */
        old_write_domain = obj->write_domain;
 -      i915_gem_flush(dev, 0, obj->write_domain);
 -      if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
 -              return -ENOMEM;
 +      i915_gem_flush_ring(dev, NULL,
 +                          to_intel_bo(obj)->ring,
 +                          0, obj->write_domain);
 +      BUG_ON(obj->write_domain);
  
        trace_i915_gem_object_change_domain(obj,
                                            obj->read_domains,
                                            old_write_domain);
 -      return 0;
 +
 +      if (pipelined)
 +              return 0;
 +
 +      return i915_gem_object_wait_rendering(obj, true);
  }
  
  /** Flushes the GTT write domain for the object if it's dirty. */
@@@ -2857,6 -2661,26 +2855,6 @@@ i915_gem_object_flush_cpu_write_domain(
                                            old_write_domain);
  }
  
 -int
 -i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
 -{
 -      int ret = 0;
 -
 -      switch (obj->write_domain) {
 -      case I915_GEM_DOMAIN_GTT:
 -              i915_gem_object_flush_gtt_write_domain(obj);
 -              break;
 -      case I915_GEM_DOMAIN_CPU:
 -              i915_gem_object_flush_cpu_write_domain(obj);
 -              break;
 -      default:
 -              ret = i915_gem_object_flush_gpu_write_domain(obj);
 -              break;
 -      }
 -
 -      return ret;
 -}
 -
  /**
   * Moves a single object to the GTT read, and possibly write domain.
   *
@@@ -2874,28 -2698,32 +2872,28 @@@ i915_gem_object_set_to_gtt_domain(struc
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
  
 -      ret = i915_gem_object_flush_gpu_write_domain(obj);
 +      ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
  
 -      /* Wait on any GPU rendering and flushing to occur. */
 -      ret = i915_gem_object_wait_rendering(obj);
 -      if (ret != 0)
 -              return ret;
 +      i915_gem_object_flush_cpu_write_domain(obj);
 +
 +      if (write) {
 +              ret = i915_gem_object_wait_rendering(obj, true);
 +              if (ret)
 +                      return ret;
 +      }
  
        old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
  
 -      /* If we're writing through the GTT domain, then CPU and GPU caches
 -       * will need to be invalidated at next use.
 -       */
 -      if (write)
 -              obj->read_domains &= I915_GEM_DOMAIN_GTT;
 -
 -      i915_gem_object_flush_cpu_write_domain(obj);
 -
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
        BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
        obj->read_domains |= I915_GEM_DOMAIN_GTT;
        if (write) {
 +              obj->read_domains = I915_GEM_DOMAIN_GTT;
                obj->write_domain = I915_GEM_DOMAIN_GTT;
                obj_priv->dirty = 1;
        }
   * wait, as in modesetting process we're not supposed to be interrupted.
   */
  int
 -i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
 +i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
 +                                   bool pipelined)
  {
 -      struct drm_device *dev = obj->dev;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 -      uint32_t old_write_domain, old_read_domains;
 +      uint32_t old_read_domains;
        int ret;
  
        /* Not valid to be called on unbound objects. */
        if (obj_priv->gtt_space == NULL)
                return -EINVAL;
  
 -      ret = i915_gem_object_flush_gpu_write_domain(obj);
 +      ret = i915_gem_object_flush_gpu_write_domain(obj, true);
        if (ret)
                return ret;
  
 -      /* Wait on any GPU rendering and flushing to occur. */
 -      if (obj_priv->active) {
 -#if WATCH_BUF
 -              DRM_INFO("%s: object %p wait for seqno %08x\n",
 -                        __func__, obj, obj_priv->last_rendering_seqno);
 -#endif
 -              ret = i915_do_wait_request(dev,
 -                              obj_priv->last_rendering_seqno,
 -                              0,
 -                              obj_priv->ring);
 -              if (ret != 0)
 +      /* Currently, we are always called from an non-interruptible context. */
 +      if (!pipelined) {
 +              ret = i915_gem_object_wait_rendering(obj, false);
 +              if (ret)
                        return ret;
        }
  
        i915_gem_object_flush_cpu_write_domain(obj);
  
 -      old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
 -
 -      /* It should now be out of any other write domains, and we can update
 -       * the domain values for our changes.
 -       */
 -      BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
 -      obj->read_domains = I915_GEM_DOMAIN_GTT;
 -      obj->write_domain = I915_GEM_DOMAIN_GTT;
 -      obj_priv->dirty = 1;
 +      obj->read_domains |= I915_GEM_DOMAIN_GTT;
  
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
 -                                          old_write_domain);
 +                                          obj->write_domain);
  
        return 0;
  }
@@@ -2958,7 -2801,12 +2956,7 @@@ i915_gem_object_set_to_cpu_domain(struc
        uint32_t old_write_domain, old_read_domains;
        int ret;
  
 -      ret = i915_gem_object_flush_gpu_write_domain(obj);
 -      if (ret)
 -              return ret;
 -
 -      /* Wait on any GPU rendering and flushing to occur. */
 -      ret = i915_gem_object_wait_rendering(obj);
 +      ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
  
         */
        i915_gem_object_set_to_full_cpu_read_domain(obj);
  
 +      if (write) {
 +              ret = i915_gem_object_wait_rendering(obj, true);
 +              if (ret)
 +                      return ret;
 +      }
 +
        old_write_domain = obj->write_domain;
        old_read_domains = obj->read_domains;
  
         * need to be invalidated at next use.
         */
        if (write) {
 -              obj->read_domains &= I915_GEM_DOMAIN_CPU;
 +              obj->read_domains = I915_GEM_DOMAIN_CPU;
                obj->write_domain = I915_GEM_DOMAIN_CPU;
        }
  
@@@ -3120,7 -2962,7 +3118,7 @@@ static voi
  i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
  {
        struct drm_device               *dev = obj->dev;
 -      drm_i915_private_t              *dev_priv = dev->dev_private;
 +      struct drm_i915_private         *dev_priv = dev->dev_private;
        struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
  
        intel_mark_busy(dev, obj);
  
 -#if WATCH_BUF
 -      DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
 -               __func__, obj,
 -               obj->read_domains, obj->pending_read_domains,
 -               obj->write_domain, obj->pending_write_domain);
 -#endif
        /*
         * If the object isn't moving to a new write domain,
         * let the object stay in multiple read domains
         * stale data. That is, any new read domains.
         */
        invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
 -      if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
 -#if WATCH_BUF
 -              DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
 -                       __func__, flush_domains, invalidate_domains);
 -#endif
 +      if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
                i915_gem_clflush_object(obj);
 -      }
  
        old_read_domains = obj->read_domains;
  
                obj->pending_write_domain = obj->write_domain;
        obj->read_domains = obj->pending_read_domains;
  
 -      if (flush_domains & I915_GEM_GPU_DOMAINS) {
 -              if (obj_priv->ring == &dev_priv->render_ring)
 -                      dev_priv->flush_rings |= FLUSH_RENDER_RING;
 -              else if (obj_priv->ring == &dev_priv->bsd_ring)
 -                      dev_priv->flush_rings |= FLUSH_BSD_RING;
 -      }
 -
        dev->invalidate_domains |= invalidate_domains;
        dev->flush_domains |= flush_domains;
 -#if WATCH_BUF
 -      DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
 -               __func__,
 -               obj->read_domains, obj->write_domain,
 -               dev->invalidate_domains, dev->flush_domains);
 -#endif
 +      if (obj_priv->ring)
 +              dev_priv->mm.flush_rings |= obj_priv->ring->id;
  
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
@@@ -3238,7 -3102,12 +3236,7 @@@ i915_gem_object_set_cpu_read_domain_ran
        if (offset == 0 && size == obj->size)
                return i915_gem_object_set_to_cpu_domain(obj, 0);
  
 -      ret = i915_gem_object_flush_gpu_write_domain(obj);
 -      if (ret)
 -              return ret;
 -
 -      /* Wait on any GPU rendering and flushing to occur. */
 -      ret = i915_gem_object_wait_rendering(obj);
 +      ret = i915_gem_object_flush_gpu_write_domain(obj, false);
        if (ret != 0)
                return ret;
        i915_gem_object_flush_gtt_write_domain(obj);
@@@ -3325,13 -3194,11 +3323,13 @@@ i915_gem_object_pin_and_relocate(struc
         * properly handle blits to/from tiled surfaces.
         */
        if (need_fence) {
 -              ret = i915_gem_object_get_fence_reg(obj);
 +              ret = i915_gem_object_get_fence_reg(obj, true);
                if (ret != 0) {
                        i915_gem_object_unpin(obj);
                        return ret;
                }
 +
 +              dev_priv->fence_regs[obj_priv->fence_reg].gpu = true;
        }
  
        entry->offset = obj_priv->gtt_offset;
                                  (int) reloc->offset,
                                  reloc->read_domains,
                                  reloc->write_domain);
 +                      drm_gem_object_unreference(target_obj);
 +                      i915_gem_object_unpin(obj);
                        return -EINVAL;
                }
                if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
                if (ret != 0) {
                        drm_gem_object_unreference(target_obj);
                        i915_gem_object_unpin(obj);
 -                      return -EINVAL;
 +                      return ret;
                }
  
                /* Map the page containing the relocation we're going to
                                                   (reloc_offset & (PAGE_SIZE - 1)));
                reloc_val = target_obj_priv->gtt_offset + reloc->delta;
  
 -#if WATCH_BUF
 -              DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
 -                        obj, (unsigned int) reloc->offset,
 -                        readl(reloc_entry), reloc_val);
 -#endif
                writel(reloc_val, reloc_entry);
                io_mapping_unmap_atomic(reloc_page, KM_USER0);
  
                drm_gem_object_unreference(target_obj);
        }
  
 -#if WATCH_BUF
 -      if (0)
 -              i915_gem_dump_object(obj, 128, __func__, ~0);
 -#endif
        return 0;
  }
  
   * relatively low latency when blocking on a particular request to finish.
   */
  static int
 -i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
 +i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  {
 -      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
 -      int ret = 0;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
 +      struct drm_i915_gem_request *request;
 +      struct intel_ring_buffer *ring = NULL;
 +      u32 seqno = 0;
 +      int ret;
  
 -      mutex_lock(&dev->struct_mutex);
 -      while (!list_empty(&i915_file_priv->mm.request_list)) {
 -              struct drm_i915_gem_request *request;
 -
 -              request = list_first_entry(&i915_file_priv->mm.request_list,
 -                                         struct drm_i915_gem_request,
 -                                         client_list);
 -
 +      spin_lock(&file_priv->mm.lock);
 +      list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
                if (time_after_eq(request->emitted_jiffies, recent_enough))
                        break;
  
 -              ret = i915_wait_request(dev, request->seqno, request->ring);
 -              if (ret != 0)
 -                      break;
 +              ring = request->ring;
 +              seqno = request->seqno;
        }
 -      mutex_unlock(&dev->struct_mutex);
 +      spin_unlock(&file_priv->mm.lock);
 +
 +      if (seqno == 0)
 +              return 0;
 +
 +      ret = 0;
 +      if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
 +              /* And wait for the seqno passing without holding any locks and
 +               * causing extra latency for others. This is safe as the irq
 +               * generation is designed to be run atomically and so is
 +               * lockless.
 +               */
 +              ring->user_irq_get(dev, ring);
 +              ret = wait_event_interruptible(ring->irq_queue,
 +                                             i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
 +                                             || atomic_read(&dev_priv->mm.wedged));
 +              ring->user_irq_put(dev, ring);
 +
 +              if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
 +                      ret = -EIO;
 +      }
 +
 +      if (ret == 0)
 +              queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  
        return ret;
  }
@@@ -3683,7 -3537,8 +3681,7 @@@ i915_gem_wait_for_pending_flip(struct d
        return ret;
  }
  
 -
 -int
 +static int
  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file_priv,
                       struct drm_i915_gem_execbuffer2 *args,
        struct drm_i915_gem_object *obj_priv;
        struct drm_clip_rect *cliprects = NULL;
        struct drm_i915_gem_relocation_entry *relocs = NULL;
 -      int ret = 0, ret2, i, pinned = 0;
 +      struct drm_i915_gem_request *request = NULL;
 +      int ret, ret2, i, pinned = 0;
        uint64_t exec_offset;
 -      uint32_t seqno, flush_domains, reloc_index;
 +      uint32_t reloc_index;
        int pin_tries, flips;
  
        struct intel_ring_buffer *ring = NULL;
  
 +      ret = i915_gem_check_is_wedged(dev);
 +      if (ret)
 +              return ret;
 +
  #if WATCH_EXEC
        DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
                  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
                }
        }
  
 +      request = kzalloc(sizeof(*request), GFP_KERNEL);
 +      if (request == NULL) {
 +              ret = -ENOMEM;
 +              goto pre_mutex_err;
 +      }
 +
        ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
                                            &relocs);
        if (ret != 0)
                goto pre_mutex_err;
  
 -      mutex_lock(&dev->struct_mutex);
 -
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 -
 -      if (atomic_read(&dev_priv->mm.wedged)) {
 -              mutex_unlock(&dev->struct_mutex);
 -              ret = -EIO;
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret)
                goto pre_mutex_err;
 -      }
  
        if (dev_priv->mm.suspended) {
                mutex_unlock(&dev->struct_mutex);
                                          pinned+1, args->buffer_count,
                                          total_size, num_fences,
                                          ret);
 -                              DRM_ERROR("%d objects [%d pinned], "
 -                                        "%d object bytes [%d pinned], "
 -                                        "%d/%d gtt bytes\n",
 -                                        atomic_read(&dev->object_count),
 -                                        atomic_read(&dev->pin_count),
 -                                        atomic_read(&dev->object_memory),
 -                                        atomic_read(&dev->pin_memory),
 -                                        atomic_read(&dev->gtt_memory),
 -                                        dev->gtt_total);
 +                              DRM_ERROR("%u objects [%u pinned, %u GTT], "
 +                                        "%zu object bytes [%zu pinned], "
 +                                        "%zu /%zu gtt bytes\n",
 +                                        dev_priv->mm.object_count,
 +                                        dev_priv->mm.pin_count,
 +                                        dev_priv->mm.gtt_count,
 +                                        dev_priv->mm.object_memory,
 +                                        dev_priv->mm.pin_memory,
 +                                        dev_priv->mm.gtt_memory,
 +                                        dev_priv->mm.gtt_total);
                        }
                        goto err;
                }
                goto err;
        }
  
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 -
        /* Zero the global flush/invalidate flags. These
         * will be modified as new domains are computed
         * for each object
         */
        dev->invalidate_domains = 0;
        dev->flush_domains = 0;
 -      dev_priv->flush_rings = 0;
 +      dev_priv->mm.flush_rings = 0;
  
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
                i915_gem_object_set_to_gpu_domain(obj);
        }
  
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 -
        if (dev->invalidate_domains | dev->flush_domains) {
  #if WATCH_EXEC
                DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
                         dev->invalidate_domains,
                         dev->flush_domains);
  #endif
 -              i915_gem_flush(dev,
 +              i915_gem_flush(dev, file_priv,
                               dev->invalidate_domains,
 -                             dev->flush_domains);
 -              if (dev_priv->flush_rings & FLUSH_RENDER_RING)
 -                      (void)i915_add_request(dev, file_priv,
 -                                             dev->flush_domains,
 -                                             &dev_priv->render_ring);
 -              if (dev_priv->flush_rings & FLUSH_BSD_RING)
 -                      (void)i915_add_request(dev, file_priv,
 -                                             dev->flush_domains,
 -                                             &dev_priv->bsd_ring);
 +                             dev->flush_domains,
 +                             dev_priv->mm.flush_rings);
        }
  
        for (i = 0; i < args->buffer_count; i++) {
                if (obj->write_domain)
                        list_move_tail(&obj_priv->gpu_write_list,
                                       &dev_priv->mm.gpu_write_list);
 -              else
 -                      list_del_init(&obj_priv->gpu_write_list);
  
                trace_i915_gem_object_change_domain(obj,
                                                    obj->read_domains,
                                                    old_write_domain);
        }
  
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 -
  #if WATCH_COHERENCY
        for (i = 0; i < args->buffer_count; i++) {
                i915_gem_object_check_coherency(object_list[i],
         * Ensure that the commands in the batch buffer are
         * finished before the interrupt fires
         */
 -      flush_domains = i915_retire_commands(dev, ring);
 -
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 +      i915_retire_commands(dev, ring);
  
 -      /*
 -       * Get a seqno representing the execution of the current buffer,
 -       * which we can wait on.  We would like to mitigate these interrupts,
 -       * likely by only creating seqnos occasionally (so that we have
 -       * *some* interrupts representing completion of buffers that we can
 -       * wait on when trying to clear up gtt space).
 -       */
 -      seqno = i915_add_request(dev, file_priv, flush_domains, ring);
 -      BUG_ON(seqno == 0);
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
                obj_priv = to_intel_bo(obj);
  
 -              i915_gem_object_move_to_active(obj, seqno, ring);
 -#if WATCH_LRU
 -              DRM_INFO("%s: move to exec list %p\n", __func__, obj);
 -#endif
 +              i915_gem_object_move_to_active(obj, ring);
        }
 -#if WATCH_LRU
 -      i915_dump_lru(dev, __func__);
 -#endif
  
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 +      i915_add_request(dev, file_priv, request, ring);
 +      request = NULL;
  
  err:
        for (i = 0; i < pinned; i++)
@@@ -4000,7 -3880,6 +3998,7 @@@ pre_mutex_err
  
        drm_free_large(object_list);
        kfree(cliprects);
 +      kfree(request);
  
        return ret;
  }
@@@ -4057,7 -3936,7 +4055,7 @@@ i915_gem_execbuffer(struct drm_device *
                exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
                exec2_list[i].alignment = exec_list[i].alignment;
                exec2_list[i].offset = exec_list[i].offset;
 -              if (!IS_I965G(dev))
 +              if (INTEL_INFO(dev)->gen < 4)
                        exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
                else
                        exec2_list[i].flags = 0;
  i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
  {
        struct drm_device *dev = obj->dev;
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
  
        BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
 -
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 +      WARN_ON(i915_verify_lists(dev));
  
        if (obj_priv->gtt_space != NULL) {
                if (alignment == 0)
         * remove it from the inactive list
         */
        if (obj_priv->pin_count == 1) {
 -              atomic_inc(&dev->pin_count);
 -              atomic_add(obj->size, &dev->pin_memory);
 -              if (!obj_priv->active &&
 -                  (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
 -                      list_del_init(&obj_priv->list);
 +              i915_gem_info_add_pin(dev_priv, obj->size);
 +              if (!obj_priv->active)
 +                      list_move_tail(&obj_priv->list,
 +                                     &dev_priv->mm.pinned_list);
        }
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
  
 +      WARN_ON(i915_verify_lists(dev));
        return 0;
  }
  
@@@ -4204,7 -4084,7 +4202,7 @@@ i915_gem_object_unpin(struct drm_gem_ob
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
  
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 +      WARN_ON(i915_verify_lists(dev));
        obj_priv->pin_count--;
        BUG_ON(obj_priv->pin_count < 0);
        BUG_ON(obj_priv->gtt_space == NULL);
         * the inactive list
         */
        if (obj_priv->pin_count == 0) {
 -              if (!obj_priv->active &&
 -                  (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
 +              if (!obj_priv->active)
                        list_move_tail(&obj_priv->list,
                                       &dev_priv->mm.inactive_list);
 -              atomic_dec(&dev->pin_count);
 -              atomic_sub(obj->size, &dev->pin_memory);
 +              i915_gem_info_remove_pin(dev_priv, obj->size);
        }
 -      i915_verify_inactive(dev, __FILE__, __LINE__);
 +      WARN_ON(i915_verify_lists(dev));
  }
  
  int
@@@ -4231,20 -4113,17 +4229,20 @@@ i915_gem_pin_ioctl(struct drm_device *d
        struct drm_i915_gem_object *obj_priv;
        int ret;
  
 -      mutex_lock(&dev->struct_mutex);
 -
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
 -              mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
        obj_priv = to_intel_bo(obj);
  
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret) {
 +              drm_gem_object_unreference_unlocked(obj);
 +              return ret;
 +      }
 +
        if (obj_priv->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to pin a purgeable buffer\n");
                drm_gem_object_unreference(obj);
@@@ -4289,23 -4168,18 +4287,23 @@@ i915_gem_unpin_ioctl(struct drm_device 
        struct drm_i915_gem_pin *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
 -
 -      mutex_lock(&dev->struct_mutex);
 +      int ret;
  
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
                          args->handle);
 -              mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
  
        obj_priv = to_intel_bo(obj);
 +
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret) {
 +              drm_gem_object_unreference_unlocked(obj);
 +              return ret;
 +      }
 +
        if (obj_priv->pin_filp != file_priv) {
                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
@@@ -4331,7 -4205,6 +4329,7 @@@ i915_gem_busy_ioctl(struct drm_device *
        struct drm_i915_gem_busy *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
 +      int ret;
  
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (obj == NULL) {
                return -ENOENT;
        }
  
 -      mutex_lock(&dev->struct_mutex);
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret) {
 +              drm_gem_object_unreference_unlocked(obj);
 +              return ret;
 +      }
  
        /* Count all active objects as busy, even if they are currently not used
         * by the gpu. Users of this interface expect objects to eventually
                 * use this buffer rather sooner than later, so issuing the required
                 * flush earlier is beneficial.
                 */
 -              if (obj->write_domain) {
 -                      i915_gem_flush(dev, 0, obj->write_domain);
 -                      (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
 -              }
 +              if (obj->write_domain & I915_GEM_GPU_DOMAINS)
 +                      i915_gem_flush_ring(dev, file_priv,
 +                                          obj_priv->ring,
 +                                          0, obj->write_domain);
  
                /* Update the active list for the hardware's current position.
                 * Otherwise this only updates on a delayed timer or when irqs
@@@ -4393,7 -4262,6 +4391,7 @@@ i915_gem_madvise_ioctl(struct drm_devic
        struct drm_i915_gem_madvise *args = data;
        struct drm_gem_object *obj;
        struct drm_i915_gem_object *obj_priv;
 +      int ret;
  
        switch (args->madv) {
        case I915_MADV_DONTNEED:
                          args->handle);
                return -ENOENT;
        }
 -
 -      mutex_lock(&dev->struct_mutex);
        obj_priv = to_intel_bo(obj);
  
 +      ret = i915_mutex_lock_interruptible(dev);
 +      if (ret) {
 +              drm_gem_object_unreference_unlocked(obj);
 +              return ret;
 +      }
 +
        if (obj_priv->pin_count) {
                drm_gem_object_unreference(obj);
                mutex_unlock(&dev->struct_mutex);
  struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
                                              size_t size)
  {
 +      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
  
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
                return NULL;
        }
  
 +      i915_gem_info_add_obj(dev_priv, size);
 +
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  
@@@ -4498,7 -4359,6 +4496,7 @@@ static void i915_gem_free_object_tail(s
                i915_gem_free_mmap_offset(obj);
  
        drm_gem_object_release(obj);
 +      i915_gem_info_remove_obj(dev_priv, obj->size);
  
        kfree(obj_priv->page_cpu_valid);
        kfree(obj_priv->bit_17);
@@@ -4557,7 -4417,7 +4555,7 @@@ i915_gem_idle(struct drm_device *dev
         * And not confound mm.suspended!
         */
        dev_priv->mm.suspended = 1;
 -      del_timer(&dev_priv->hangcheck_timer);
 +      del_timer_sync(&dev_priv->hangcheck_timer);
  
        i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
@@@ -4637,18 -4497,28 +4635,18 @@@ i915_gem_init_ringbuffer(struct drm_dev
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
  
 -      dev_priv->render_ring = render_ring;
 -
 -      if (!I915_NEED_GFX_HWS(dev)) {
 -              dev_priv->render_ring.status_page.page_addr
 -                      = dev_priv->status_page_dmah->vaddr;
 -              memset(dev_priv->render_ring.status_page.page_addr,
 -                              0, PAGE_SIZE);
 -      }
 -
        if (HAS_PIPE_CONTROL(dev)) {
                ret = i915_gem_init_pipe_control(dev);
                if (ret)
                        return ret;
        }
  
 -      ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
 +      ret = intel_init_render_ring_buffer(dev);
        if (ret)
                goto cleanup_pipe_control;
  
        if (HAS_BSD(dev)) {
 -              dev_priv->bsd_ring = bsd_ring;
 -              ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
 +              ret = intel_init_bsd_ring_buffer(dev);
                if (ret)
                        goto cleanup_render_ring;
        }
@@@ -4701,8 -4571,11 +4699,8 @@@ i915_gem_entervt_ioctl(struct drm_devic
                return ret;
        }
  
 -      spin_lock(&dev_priv->mm.active_list_lock);
        BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
        BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
 -      spin_unlock(&dev_priv->mm.active_list_lock);
 -
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
        BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
        BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
@@@ -4754,10 -4627,10 +4752,10 @@@ i915_gem_load(struct drm_device *dev
        int i;
        drm_i915_private_t *dev_priv = dev->dev_private;
  
 -      spin_lock_init(&dev_priv->mm.active_list_lock);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
 +      INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
        INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
 +      init_completion(&dev_priv->error_completion);
        spin_lock(&shrink_list_lock);
        list_add(&dev_priv->mm.shrink_list, &shrink_list);
        spin_unlock(&shrink_list_lock);
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                dev_priv->fence_reg_start = 3;
  
 -      if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
 +      if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                dev_priv->num_fence_regs = 16;
        else
                dev_priv->num_fence_regs = 8;
  
        /* Initialize fence registers to zero */
 -      if (IS_I965G(dev)) {
 +      switch (INTEL_INFO(dev)->gen) {
 +      case 6:
 +              for (i = 0; i < 16; i++)
 +                      I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
 +              break;
 +      case 5:
 +      case 4:
                for (i = 0; i < 16; i++)
                        I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
 -      } else {
 -              for (i = 0; i < 8; i++)
 -                      I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
 +              break;
 +      case 3:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
                                I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
 +      case 2:
 +              for (i = 0; i < 8; i++)
 +                      I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
 +              break;
        }
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
   * Create a physically contiguous memory object for this object
   * e.g. for cursor + overlay regs
   */
 -int i915_gem_init_phys_object(struct drm_device *dev,
 -                            int id, int size, int align)
 +static int i915_gem_init_phys_object(struct drm_device *dev,
 +                                   int id, int size, int align)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_phys_object *phys_obj;
@@@ -4855,7 -4718,7 +4853,7 @@@ kfree_obj
        return ret;
  }
  
 -void i915_gem_free_phys_object(struct drm_device *dev, int id)
 +static void i915_gem_free_phys_object(struct drm_device *dev, int id)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_phys_object *phys_obj;
@@@ -5000,25 -4863,18 +4998,25 @@@ i915_gem_phys_pwrite(struct drm_device 
        return 0;
  }
  
 -void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
 +void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  {
 -      struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
 +      struct drm_i915_file_private *file_priv = file->driver_priv;
  
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
         */
 -      mutex_lock(&dev->struct_mutex);
 -      while (!list_empty(&i915_file_priv->mm.request_list))
 -              list_del_init(i915_file_priv->mm.request_list.next);
 -      mutex_unlock(&dev->struct_mutex);
 +      spin_lock(&file_priv->mm.lock);
 +      while (!list_empty(&file_priv->mm.request_list)) {
 +              struct drm_i915_gem_request *request;
 +
 +              request = list_first_entry(&file_priv->mm.request_list,
 +                                         struct drm_i915_gem_request,
 +                                         client_list);
 +              list_del(&request->client_list);
 +              request->file_priv = NULL;
 +      }
 +      spin_unlock(&file_priv->mm.lock);
  }
  
  static int
@@@ -5027,10 -4883,12 +5025,10 @@@ i915_gpu_is_active(struct drm_device *d
        drm_i915_private_t *dev_priv = dev->dev_private;
        int lists_empty;
  
 -      spin_lock(&dev_priv->mm.active_list_lock);
        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
                      list_empty(&dev_priv->render_ring.active_list);
        if (HAS_BSD(dev))
                lists_empty &= list_empty(&dev_priv->bsd_ring.active_list);
 -      spin_unlock(&dev_priv->mm.active_list_lock);
  
        return !lists_empty;
  }
index 7dc50acd65d7698a4219d3fd325160777c10ddb2,56ad9df2ccb58925bdd74a2f3b64bac01360f562..b937ccfa7bec9a037ae7b6fddf6d635552a4aba1
  #include "i915_drm.h"
  #include "i915_drv.h"
  
 -struct intel_fbdev {
 -      struct drm_fb_helper helper;
 -      struct intel_framebuffer ifb;
 -      struct list_head fbdev_list;
 -      struct drm_display_mode *our_mode;
 -};
 -
  static struct fb_ops intelfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
@@@ -68,7 -75,7 +68,7 @@@ static int intelfb_create(struct intel_
        struct drm_gem_object *fbo = NULL;
        struct drm_i915_gem_object *obj_priv;
        struct device *device = &dev->pdev->dev;
 -      int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
 +      int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
  
        /* we don't do packed 24bpp */
        if (sizes->surface_bpp == 24)
  
        mutex_lock(&dev->struct_mutex);
  
 -      ret = intel_pin_and_fence_fb_obj(dev, fbo);
 +      /* Flush everything out, we'll be doing GTT only from now on */
 +      ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
        if (ret) {
                DRM_ERROR("failed to pin fb: %d\n", ret);
                goto out_unref;
        }
  
 -      /* Flush everything out, we'll be doing GTT only from now on */
 -      ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
 -      if (ret) {
 -              DRM_ERROR("failed to bind fb: %d.\n", ret);
 -              goto out_unpin;
 -      }
 -
        info = framebuffer_alloc(0, device);
        if (!info) {
                ret = -ENOMEM;
                goto out_unpin;
        }
        info->apertures->ranges[0].base = dev->mode_config.fb_base;
 -      if (IS_I9XX(dev))
 +      if (!IS_GEN2(dev))
                info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
        else
                info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@@ -206,8 -219,8 +206,8 @@@ static struct drm_fb_helper_funcs intel
        .fb_probe = intel_fb_find_or_create_single,
  };
  
 -int intel_fbdev_destroy(struct drm_device *dev,
 -                      struct intel_fbdev *ifbdev)
 +static void intel_fbdev_destroy(struct drm_device *dev,
 +                              struct intel_fbdev *ifbdev)
  {
        struct fb_info *info;
        struct intel_framebuffer *ifb = &ifbdev->ifb;
        drm_fb_helper_fini(&ifbdev->helper);
  
        drm_framebuffer_cleanup(&ifb->base);
-       if (ifb->obj)
+       if (ifb->obj) {
 -              drm_gem_object_handle_unreference(ifb->obj);
 -              drm_gem_object_unreference(ifb->obj);
++              drm_gem_object_handle_unreference_unlocked(ifb->obj);
 +              drm_gem_object_unreference_unlocked(ifb->obj);
+       }
 -
 -      return 0;
  }
  
  int intel_fbdev_init(struct drm_device *dev)
index e645f44e43020c45984e576bf64b4f767ada9a9b,a96ed6d9d010b82cfc58ed41ec6240f99d5a9103..5c845b6ec4920806b49e96d5f91c49f3f0fff511
@@@ -148,13 -148,16 +148,16 @@@ static struct pci_device_id vmw_pci_id_
        {0, 0, 0}
  };
  
- static char *vmw_devname = "vmwgfx";
+ static int enable_fbdev;
  
  static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
  static void vmw_master_init(struct vmw_master *);
  static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
                              void *ptr);
  
+ MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
+ module_param_named(enable_fbdev, enable_fbdev, int, 0600);
  static void vmw_print_capabilities(uint32_t capabilities)
  {
        DRM_INFO("Capabilities:\n");
@@@ -192,8 -195,6 +195,6 @@@ static int vmw_request_device(struct vm
  {
        int ret;
  
-       vmw_kms_save_vga(dev_priv);
        ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Unable to initialize FIFO.\n");
  static void vmw_release_device(struct vmw_private *dev_priv)
  {
        vmw_fifo_release(dev_priv, &dev_priv->fifo);
-       vmw_kms_restore_vga(dev_priv);
  }
  
+ int vmw_3d_resource_inc(struct vmw_private *dev_priv)
+ {
+       int ret = 0;
+       mutex_lock(&dev_priv->release_mutex);
+       if (unlikely(dev_priv->num_3d_resources++ == 0)) {
+               ret = vmw_request_device(dev_priv);
+               if (unlikely(ret != 0))
+                       --dev_priv->num_3d_resources;
+       }
+       mutex_unlock(&dev_priv->release_mutex);
+       return ret;
+ }
+ void vmw_3d_resource_dec(struct vmw_private *dev_priv)
+ {
+       int32_t n3d;
+       mutex_lock(&dev_priv->release_mutex);
+       if (unlikely(--dev_priv->num_3d_resources == 0))
+               vmw_release_device(dev_priv);
+       n3d = (int32_t) dev_priv->num_3d_resources;
+       mutex_unlock(&dev_priv->release_mutex);
+       BUG_ON(n3d < 0);
+ }
  
  static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
  {
        dev_priv->last_read_sequence = (uint32_t) -100;
        mutex_init(&dev_priv->hw_mutex);
        mutex_init(&dev_priv->cmdbuf_mutex);
+       mutex_init(&dev_priv->release_mutex);
        rwlock_init(&dev_priv->resource_lock);
        idr_init(&dev_priv->context_idr);
        idr_init(&dev_priv->surface_idr);
        dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
        dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
  
+       dev_priv->enable_fb = enable_fbdev;
        mutex_lock(&dev_priv->hw_mutex);
  
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
  
        dev->dev_private = dev_priv;
  
-       if (!dev->devname)
-               dev->devname = vmw_devname;
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
-               ret = drm_irq_install(dev);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("Failed installing irq: %d\n", ret);
-                       goto out_no_irq;
-               }
-       }
        ret = pci_request_regions(dev->pdev, "vmwgfx probe");
        dev_priv->stealth = (ret != 0);
        if (dev_priv->stealth) {
                        goto out_no_device;
                }
        }
-       ret = vmw_request_device(dev_priv);
+       ret = vmw_kms_init(dev_priv);
        if (unlikely(ret != 0))
-               goto out_no_device;
-       vmw_kms_init(dev_priv);
+               goto out_no_kms;
        vmw_overlay_init(dev_priv);
-       vmw_fb_init(dev_priv);
+       if (dev_priv->enable_fb) {
+               ret = vmw_3d_resource_inc(dev_priv);
+               if (unlikely(ret != 0))
+                       goto out_no_fifo;
+               vmw_kms_save_vga(dev_priv);
+               vmw_fb_init(dev_priv);
+               DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
+                        "Detected device 3D availability.\n" :
+                        "Detected no device 3D availability.\n");
+       } else {
+               DRM_INFO("Delayed 3D detection since we're not "
+                        "running the device in SVGA mode yet.\n");
+       }
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+               ret = drm_irq_install(dev);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed installing irq: %d\n", ret);
+                       goto out_no_irq;
+               }
+       }
  
        dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
        register_pm_notifier(&dev_priv->pm_nb);
  
-       DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
        return 0;
  
- out_no_device:
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-               drm_irq_uninstall(dev_priv->dev);
-       if (dev->devname == vmw_devname)
-               dev->devname = NULL;
  out_no_irq:
+       if (dev_priv->enable_fb) {
+               vmw_fb_close(dev_priv);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
+ out_no_fifo:
+       vmw_overlay_close(dev_priv);
+       vmw_kms_close(dev_priv);
+ out_no_kms:
+       if (dev_priv->stealth)
+               pci_release_region(dev->pdev, 2);
+       else
+               pci_release_regions(dev->pdev);
+ out_no_device:
        ttm_object_device_release(&dev_priv->tdev);
  out_err4:
        iounmap(dev_priv->mmio_virt);
@@@ -415,19 -460,20 +460,20 @@@ static int vmw_driver_unload(struct drm
  
        unregister_pm_notifier(&dev_priv->pm_nb);
  
-       vmw_fb_close(dev_priv);
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+               drm_irq_uninstall(dev_priv->dev);
+       if (dev_priv->enable_fb) {
+               vmw_fb_close(dev_priv);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
        vmw_kms_close(dev_priv);
        vmw_overlay_close(dev_priv);
-       vmw_release_device(dev_priv);
        if (dev_priv->stealth)
                pci_release_region(dev->pdev, 2);
        else
                pci_release_regions(dev->pdev);
  
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-               drm_irq_uninstall(dev_priv->dev);
-       if (dev->devname == vmw_devname)
-               dev->devname = NULL;
        ttm_object_device_release(&dev_priv->tdev);
        iounmap(dev_priv->mmio_virt);
        drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
@@@ -500,7 -546,7 +546,7 @@@ static long vmw_unlocked_ioctl(struct f
                struct drm_ioctl_desc *ioctl =
                    &vmw_ioctls[nr - DRM_COMMAND_BASE];
  
-               if (unlikely(ioctl->cmd != cmd)) {
+               if (unlikely(ioctl->cmd_drv != cmd)) {
                        DRM_ERROR("Invalid command format, ioctl %d\n",
                                  nr - DRM_COMMAND_BASE);
                        return -EINVAL;
@@@ -589,6 -635,16 +635,16 @@@ static int vmw_master_set(struct drm_de
        struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret = 0;
  
+       if (!dev_priv->enable_fb) {
+               ret = vmw_3d_resource_inc(dev_priv);
+               if (unlikely(ret != 0))
+                       return ret;
+               vmw_kms_save_vga(dev_priv);
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 0);
+               mutex_unlock(&dev_priv->hw_mutex);
+       }
        if (active) {
                BUG_ON(active != &dev_priv->fbdev_master);
                ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
        return 0;
  
  out_no_active_lock:
-       vmw_release_device(dev_priv);
+       if (!dev_priv->enable_fb) {
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+               mutex_unlock(&dev_priv->hw_mutex);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
        return ret;
  }
  
@@@ -645,11 -707,23 +707,23 @@@ static void vmw_master_drop(struct drm_
  
        ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  
+       if (!dev_priv->enable_fb) {
+               ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+               if (unlikely(ret != 0))
+                       DRM_ERROR("Unable to clean VRAM on master drop.\n");
+               mutex_lock(&dev_priv->hw_mutex);
+               vmw_write(dev_priv, SVGA_REG_TRACES, 1);
+               mutex_unlock(&dev_priv->hw_mutex);
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv);
+       }
        dev_priv->active_master = &dev_priv->fbdev_master;
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        ttm_vt_unlock(&dev_priv->fbdev_master.lock);
  
-       vmw_fb_on(dev_priv);
+       if (dev_priv->enable_fb)
+               vmw_fb_on(dev_priv);
  }
  
  
@@@ -722,7 -796,10 +796,8 @@@ static struct drm_driver driver = 
        .irq_postinstall = vmw_irq_postinstall,
        .irq_uninstall = vmw_irq_uninstall,
        .irq_handler = vmw_irq_handler,
+       .get_vblank_counter = vmw_get_vblank_counter,
        .reclaim_buffers_locked = NULL,
 -      .get_map_ofs = drm_core_get_map_ofs,
 -      .get_reg_ofs = drm_core_get_reg_ofs,
        .ioctls = vmw_ioctls,
        .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
        .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
diff --combined include/drm/drmP.h
index bb5c41893c00f6f31d4742cb9e6824af60d6b241,4c9461a4f9e67b4b3e67c5bb73192aed44695079..274eaaa15c36a86ee48a6b743f6c9d7cfb36b010
@@@ -612,7 -612,7 +612,7 @@@ struct drm_gem_object 
        struct kref refcount;
  
        /** Handle count of this object. Each handle also holds a reference */
-       struct kref handlecount;
+       atomic_t handle_count; /* number of handles on this object */
  
        /** Related drm device */
        struct drm_device *dev;
@@@ -699,8 -699,13 +699,8 @@@ struct drm_driver 
        int (*suspend) (struct drm_device *, pm_message_t state);
        int (*resume) (struct drm_device *);
        int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
 -      void (*dma_ready) (struct drm_device *);
        int (*dma_quiescent) (struct drm_device *);
 -      int (*context_ctor) (struct drm_device *dev, int context);
        int (*context_dtor) (struct drm_device *dev, int context);
 -      int (*kernel_context_switch) (struct drm_device *dev, int old,
 -                                    int new);
 -      void (*kernel_context_switch_unlock) (struct drm_device *dev);
  
        /**
         * get_vblank_counter - get raw hardware vblank counter
                                        struct drm_file *file_priv);
        void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
                                            struct drm_file *file_priv);
 -      resource_size_t (*get_map_ofs) (struct drm_local_map * map);
 -      resource_size_t (*get_reg_ofs) (struct drm_device *dev);
        void (*set_version) (struct drm_device *dev,
                             struct drm_set_version *sv);
  
        void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
                            bool from_release);
  
 -      int (*proc_init)(struct drm_minor *minor);
 -      void (*proc_cleanup)(struct drm_minor *minor);
        int (*debugfs_init)(struct drm_minor *minor);
        void (*debugfs_cleanup)(struct drm_minor *minor);
  
@@@ -963,6 -972,7 +963,6 @@@ struct drm_device 
        __volatile__ long context_flag; /**< Context swapping flag */
        __volatile__ long interrupt_flag; /**< Interruption handler flag */
        __volatile__ long dma_flag;     /**< DMA dispatch flag */
 -      struct timer_list timer;        /**< Timer for delaying ctx switch */
        wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
        int last_checked;               /**< Last context checked for DMA */
        int last_context;               /**< Last current context */
        struct drm_minor *control;              /**< Control node for card */
        struct drm_minor *primary;              /**< render type primary screen head */
  
 -      /** \name Drawable information */
 -      /*@{ */
 -      spinlock_t drw_lock;
 -      struct idr drw_idr;
 -      /*@} */
 -
          struct drm_mode_config mode_config;   /**< Current mode config */
  
        /** \name GEM information */
        /*@{ */
        spinlock_t object_name_lock;
        struct idr object_name_idr;
 -      atomic_t object_count;
 -      atomic_t object_memory;
 -      atomic_t pin_count;
 -      atomic_t pin_memory;
 -      atomic_t gtt_count;
 -      atomic_t gtt_memory;
 -      uint32_t gtt_total;
        uint32_t invalidate_domains;    /* domains pending invalidation */
        uint32_t flush_domains;         /* domains pending flush */
        /*@} */
@@@ -1151,6 -1174,9 +1151,7 @@@ extern int drm_release(struct inode *in
  extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
  extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
  extern void drm_vm_open_locked(struct vm_area_struct *vma);
 -extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
 -extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
+ extern void drm_vm_close_locked(struct vm_area_struct *vma);
  extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
  
                                /* Memory management support (drm_memory.h) */
@@@ -1160,7 -1186,8 +1161,7 @@@ extern int drm_mem_info(char *buf, cha
                        int request, int *eof, void *data);
  extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
  
 -extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
 -extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
 +extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
  extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
  extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
                                       struct page **pages,
@@@ -1212,6 -1239,17 +1213,6 @@@ extern int drm_setsareactx(struct drm_d
  extern int drm_getsareactx(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
  
 -                              /* Drawable IOCTL support (drm_drawable.h) */
 -extern int drm_adddraw(struct drm_device *dev, void *data,
 -                     struct drm_file *file_priv);
 -extern int drm_rmdraw(struct drm_device *dev, void *data,
 -                    struct drm_file *file_priv);
 -extern int drm_update_drawable_info(struct drm_device *dev, void *data,
 -                                  struct drm_file *file_priv);
 -extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
 -                                                drm_drawable_t id);
 -extern void drm_drawable_free_all(struct drm_device *dev);
 -
                                /* Authentication IOCTL support (drm_auth.h) */
  extern int drm_getmagic(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
@@@ -1226,6 -1264,7 +1227,6 @@@ extern int drm_lock(struct drm_device *
                    struct drm_file *file_priv);
  extern int drm_unlock(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
 -extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
  extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
  extern void drm_idlelock_take(struct drm_lock_data *lock_data);
  extern void drm_idlelock_release(struct drm_lock_data *lock_data);
@@@ -1320,6 -1359,10 +1321,6 @@@ extern int drm_agp_unbind_ioctl(struct 
  extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
  extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 -extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
 -extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
 -extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
 -extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
  extern void drm_agp_chipset_flush(struct drm_device *dev);
  
                                /* Stub support (drm_stub.h) */
@@@ -1371,6 -1414,7 +1372,6 @@@ extern int drm_bufs_info(struct seq_fil
  extern int drm_vblank_info(struct seq_file *m, void *data);
  extern int drm_clients_info(struct seq_file *m, void* data);
  extern int drm_gem_name_info(struct seq_file *m, void *data);
 -extern int drm_gem_object_info(struct seq_file *m, void* data);
  
  #if DRM_DEBUG_CODE
  extern int drm_vma_info(struct seq_file *m, void *data);
@@@ -1411,12 -1455,11 +1412,11 @@@ int drm_gem_init(struct drm_device *dev
  void drm_gem_destroy(struct drm_device *dev);
  void drm_gem_object_release(struct drm_gem_object *obj);
  void drm_gem_object_free(struct kref *kref);
- void drm_gem_object_free_unlocked(struct kref *kref);
  struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
                                            size_t size);
  int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size);
- void drm_gem_object_handle_free(struct kref *kref);
+ void drm_gem_object_handle_free(struct drm_gem_object *obj);
  void drm_gem_vm_open(struct vm_area_struct *vma);
  void drm_gem_vm_close(struct vm_area_struct *vma);
  int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@@ -1439,8 -1482,12 +1439,12 @@@ drm_gem_object_unreference(struct drm_g
  static inline void
  drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
  {
-       if (obj != NULL)
-               kref_put(&obj->refcount, drm_gem_object_free_unlocked);
+       if (obj != NULL) {
+               struct drm_device *dev = obj->dev;
+               mutex_lock(&dev->struct_mutex);
+               kref_put(&obj->refcount, drm_gem_object_free);
+               mutex_unlock(&dev->struct_mutex);
+       }
  }
  
  int drm_gem_handle_create(struct drm_file *file_priv,
@@@ -1451,7 -1498,7 +1455,7 @@@ static inline voi
  drm_gem_object_handle_reference(struct drm_gem_object *obj)
  {
        drm_gem_object_reference(obj);
-       kref_get(&obj->handlecount);
+       atomic_inc(&obj->handle_count);
  }
  
  static inline void
@@@ -1460,12 -1507,15 +1464,15 @@@ drm_gem_object_handle_unreference(struc
        if (obj == NULL)
                return;
  
+       if (atomic_read(&obj->handle_count) == 0)
+               return;
        /*
         * Must bump handle count first as this may be the last
         * ref, in which case the object would disappear before we
         * checked for a name
         */
-       kref_put(&obj->handlecount, drm_gem_object_handle_free);
+       if (atomic_dec_and_test(&obj->handle_count))
+               drm_gem_object_handle_free(obj);
        drm_gem_object_unreference(obj);
  }
  
@@@ -1475,12 -1525,17 +1482,17 @@@ drm_gem_object_handle_unreference_unloc
        if (obj == NULL)
                return;
  
+       if (atomic_read(&obj->handle_count) == 0)
+               return;
        /*
        * Must bump handle count first as this may be the last
        * ref, in which case the object would disappear before we
        * checked for a name
        */
-       kref_put(&obj->handlecount, drm_gem_object_handle_free);
+       if (atomic_dec_and_test(&obj->handle_count))
+               drm_gem_object_handle_free(obj);
        drm_gem_object_unreference_unlocked(obj);
  }