]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index cf57276ce9aa366cbbe571758f6137a74ca1a64c..13c885d663833332181750ea8b22559a477f4161 100644 (file)
@@ -971,6 +971,25 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
        return ret;
 }
 
+static void fake_irq(unsigned long data)
+{
+       wake_up_process((struct task_struct *)data);
+}
+
+static bool missed_irq(struct drm_i915_private *dev_priv,
+                      struct intel_ring_buffer *ring)
+{
+       return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+}
+
+static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+{
+       if (file_priv == NULL)
+               return true;
+
+       return !atomic_xchg(&file_priv->rps_wait_boost, true);
+}
+
 /**
  * __wait_seqno - wait until execution of seqno has finished
  * @ring: the ring expected to report seqno
@@ -991,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
  */
 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        unsigned reset_counter,
-                       bool interruptible, struct timespec *timeout)
+                       bool interruptible,
+                       struct timespec *timeout,
+                       struct drm_i915_file_private *file_priv)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
-       struct timespec before, now, wait_time={1,0};
-       unsigned long timeout_jiffies;
-       long end;
-       bool wait_forever = true;
+       struct timespec before, now;
+       DEFINE_WAIT(wait);
+       long timeout_jiffies;
        int ret;
 
        WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1005,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
                return 0;
 
-       trace_i915_gem_request_wait_begin(ring, seqno);
+       timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
 
-       if (timeout != NULL) {
-               wait_time = *timeout;
-               wait_forever = false;
+       if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+               gen6_rps_boost(dev_priv);
+               if (file_priv)
+                       mod_delayed_work(dev_priv->wq,
+                                        &file_priv->mm.idle_work,
+                                        msecs_to_jiffies(100));
        }
 
-       timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
-
-       if (WARN_ON(!ring->irq_get(ring)))
+       if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
+           WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
 
-       /* Record current time in case interrupted by signal, or wedged * */
+       /* Record current time in case interrupted by signal, or wedged */
+       trace_i915_gem_request_wait_begin(ring, seqno);
        getrawmonotonic(&before);
+       for (;;) {
+               struct timer_list timer;
+               unsigned long expire;
 
-#define EXIT_COND \
-       (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
-        i915_reset_in_progress(&dev_priv->gpu_error) || \
-        reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-       do {
-               if (interruptible)
-                       end = wait_event_interruptible_timeout(ring->irq_queue,
-                                                              EXIT_COND,
-                                                              timeout_jiffies);
-               else
-                       end = wait_event_timeout(ring->irq_queue, EXIT_COND,
-                                                timeout_jiffies);
+               prepare_to_wait(&ring->irq_queue, &wait,
+                               interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 
                /* We need to check whether any gpu reset happened in between
                 * the caller grabbing the seqno and now ... */
-               if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-                       end = -EAGAIN;
+               if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+                       /* ... but upgrade the -EAGAIN to an -EIO if the gpu
+                        * is truely gone. */
+                       ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+                       if (ret == 0)
+                               ret = -EAGAIN;
+                       break;
+               }
 
-               /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
-                * gone. */
-               ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
-               if (ret)
-                       end = ret;
-       } while (end == 0 && wait_forever);
+               if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+                       ret = 0;
+                       break;
+               }
+
+               if (interruptible && signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+
+               if (timeout_jiffies <= 0) {
+                       ret = -ETIME;
+                       break;
+               }
+
+               timer.function = NULL;
+               if (timeout || missed_irq(dev_priv, ring)) {
+                       setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
+                       expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+                       mod_timer(&timer, expire);
+               }
+
+               schedule();
 
+               if (timeout)
+                       timeout_jiffies = expire - jiffies;
+
+               if (timer.function) {
+                       del_singleshot_timer_sync(&timer);
+                       destroy_timer_on_stack(&timer);
+               }
+       }
        getrawmonotonic(&now);
+       trace_i915_gem_request_wait_end(ring, seqno);
 
        ring->irq_put(ring);
-       trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
+
+       finish_wait(&ring->irq_queue, &wait);
 
        if (timeout) {
                struct timespec sleep_time = timespec_sub(now, before);
@@ -1058,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        set_normalized_timespec(timeout, 0, 0);
        }
 
-       switch (end) {
-       case -EIO:
-       case -EAGAIN: /* Wedged */
-       case -ERESTARTSYS: /* Signal */
-               return (int)end;
-       case 0: /* Timeout */
-               return -ETIME;
-       default: /* Completed */
-               WARN_ON(end < 0); /* We're not aware of other errors */
-               return 0;
-       }
+       return ret;
 }
 
 /**
@@ -1096,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
 
        return __wait_seqno(ring, seqno,
                            atomic_read(&dev_priv->gpu_error.reset_counter),
-                           interruptible, NULL);
+                           interruptible, NULL, NULL);
 }
 
 static int
@@ -1146,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  */
 static __must_check int
 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+                                           struct drm_file *file,
                                            bool readonly)
 {
        struct drm_device *dev = obj->base.dev;
@@ -1172,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
        mutex_lock(&dev->struct_mutex);
        if (ret)
                return ret;
@@ -1221,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
         * We will repeat the flush holding the lock in the normal manner
         * to catch cases where we are gazumped.
         */
-       ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+       ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
        if (ret)
                goto unref;
 
@@ -1917,7 +1956,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-void
+static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *ring)
 {
@@ -1956,6 +1995,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        }
 }
 
+void i915_vma_move_to_active(struct i915_vma *vma,
+                            struct intel_ring_buffer *ring)
+{
+       list_move_tail(&vma->mm_list, &vma->vm->active_list);
+       return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
@@ -2135,6 +2181,7 @@ int __i915_add_request(struct intel_ring_buffer *ring,
                i915_queue_hangcheck(ring->dev);
 
                if (was_empty) {
+                       cancel_delayed_work_sync(&dev_priv->mm.idle_work);
                        queue_delayed_work(dev_priv->wq,
                                           &dev_priv->mm.retire_work,
                                           round_jiffies_up_relative(HZ));
@@ -2156,10 +2203,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
                return;
 
        spin_lock(&file_priv->mm.lock);
-       if (request->file_priv) {
-               list_del(&request->client_list);
-               request->file_priv = NULL;
-       }
+       list_del(&request->client_list);
+       request->file_priv = NULL;
        spin_unlock(&file_priv->mm.lock);
 }
 
@@ -2423,57 +2468,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
        WARN_ON(i915_verify_lists(ring->dev));
 }
 
-void
+bool
 i915_gem_retire_requests(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
+       bool idle = true;
        int i;
 
-       for_each_ring(ring, dev_priv, i)
+       for_each_ring(ring, dev_priv, i) {
                i915_gem_retire_requests_ring(ring);
+               idle &= list_empty(&ring->request_list);
+       }
+
+       if (idle)
+               mod_delayed_work(dev_priv->wq,
+                                  &dev_priv->mm.idle_work,
+                                  msecs_to_jiffies(100));
+
+       return idle;
 }
 
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
-       drm_i915_private_t *dev_priv;
-       struct drm_device *dev;
-       struct intel_ring_buffer *ring;
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), mm.retire_work.work);
+       struct drm_device *dev = dev_priv->dev;
        bool idle;
-       int i;
-
-       dev_priv = container_of(work, drm_i915_private_t,
-                               mm.retire_work.work);
-       dev = dev_priv->dev;
 
        /* Come back later if the device is busy... */
-       if (!mutex_trylock(&dev->struct_mutex)) {
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
-                                  round_jiffies_up_relative(HZ));
-               return;
-       }
-
-       i915_gem_retire_requests(dev);
-
-       /* Send a periodic flush down the ring so we don't hold onto GEM
-        * objects indefinitely.
-        */
-       idle = true;
-       for_each_ring(ring, dev_priv, i) {
-               if (ring->gpu_caches_dirty)
-                       i915_add_request(ring, NULL);
-
-               idle &= list_empty(&ring->request_list);
+       idle = false;
+       if (mutex_trylock(&dev->struct_mutex)) {
+               idle = i915_gem_retire_requests(dev);
+               mutex_unlock(&dev->struct_mutex);
        }
-
-       if (!dev_priv->ums.mm_suspended && !idle)
+       if (!idle)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
                                   round_jiffies_up_relative(HZ));
-       if (idle)
-               intel_mark_idle(dev);
+}
 
-       mutex_unlock(&dev->struct_mutex);
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), mm.idle_work.work);
+
+       intel_mark_idle(dev_priv->dev);
 }
 
 /**
@@ -2571,7 +2612,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
 
-       ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
+       ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
        if (timeout)
                args->timeout_ns = timespec_to_ns(timeout);
        return ret;
@@ -2618,6 +2659,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
+       trace_i915_gem_ring_sync_to(from, to, seqno);
        ret = to->sync_to(to, from, seqno);
        if (!ret)
                /* We use last_read_seqno because sync_to()
@@ -3410,8 +3452,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
        /* And bump the LRU for this access */
        if (i915_gem_object_is_inactive(obj)) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
-                                                          &dev_priv->gtt.base);
+               struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
                if (vma)
                        list_move_tail(&vma->mm_list,
                                       &dev_priv->gtt.base.inactive_list);
@@ -3782,7 +3823,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (seqno == 0)
                return 0;
 
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -4225,16 +4266,13 @@ i915_gem_idle(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
-       if (dev_priv->ums.mm_suspended) {
-               mutex_unlock(&dev->struct_mutex);
+       if (dev_priv->ums.mm_suspended)
                return 0;
-       }
 
        ret = i915_gpu_idle(dev);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
+       if (ret)
                return ret;
-       }
+
        i915_gem_retire_requests(dev);
 
        /* Under UMS, be paranoid and evict. */
@@ -4248,6 +4286,7 @@ i915_gem_idle(struct drm_device *dev)
 
        /* Cancel the retire work handler, which should be idle now. */
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+       cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 
        return 0;
 }
@@ -4581,6 +4620,8 @@ i915_gem_load(struct drm_device *dev)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
+       INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+                         i915_gem_idle_work_handler);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
        /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4631,7 +4672,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
        if (dev_priv->mm.phys_objs[id - 1] || !size)
                return 0;
 
-       phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
+       phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
        if (!phys_obj)
                return -ENOMEM;
 
@@ -4805,6 +4846,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
+       cancel_delayed_work_sync(&file_priv->mm.idle_work);
+
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
@@ -4822,6 +4865,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
        spin_unlock(&file_priv->mm.lock);
 }
 
+static void
+i915_gem_file_idle_work_handler(struct work_struct *work)
+{
+       struct drm_i915_file_private *file_priv =
+               container_of(work, typeof(*file_priv), mm.idle_work.work);
+
+       atomic_set(&file_priv->rps_wait_boost, false);
+}
+
+int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+{
+       struct drm_i915_file_private *file_priv;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       file->driver_priv = file_priv;
+       file_priv->dev_priv = dev->dev_private;
+
+       spin_lock_init(&file_priv->mm.lock);
+       INIT_LIST_HEAD(&file_priv->mm.request_list);
+       INIT_DELAYED_WORK(&file_priv->mm.idle_work,
+                         i915_gem_file_idle_work_handler);
+
+       idr_init(&file_priv->context_idr);
+
+       return 0;
+}
+
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 {
        if (!mutex_is_locked(mutex))
@@ -4968,3 +5043,17 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
                mutex_unlock(&dev->struct_mutex);
        return freed;
 }
+
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma;
+
+       if (WARN_ON(list_empty(&obj->vma_list)))
+               return NULL;
+
+       vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
+       if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+               return NULL;
+
+       return vma;
+}