]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
drm/ttm: Add a bo list reserve fastpath (v2)
authorDave Airlie <airlied@redhat.com>
Mon, 22 Nov 2010 03:24:40 +0000 (13:24 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 22 Nov 2010 03:24:40 +0000 (13:24 +1000)
Makes it possible to reserve a list of buffer objects with a single
spin lock / unlock if there is no contention.
Should improve cpu usage on SMP kernels.

v2: Initialize private list members on reserve and don't call
ttm_bo_list_ref_sub() with zero put_count.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_execbuf_util.h

index 148a322d8f5d7c45d0487351635f89eb9a9a0949..a586378b1b2b420110bebfc3f74c13d278d07e70 100644 (file)
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
 }
 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
 
-static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
        }
 }
 
-/**
- * Call with the lru_lock held.
- */
-
-static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 {
        int put_count = 0;
 
@@ -267,6 +263,15 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
        BUG();
 }
 
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+                        bool never_free)
+{
+       while (count--)
+               kref_put(&bo->list_kref,
+                        (never_free || (count >= 0)) ? ttm_bo_ref_bug :
+                        ttm_bo_release_list);
+}
+
 int ttm_bo_reserve(struct ttm_buffer_object *bo,
                   bool interruptible,
                   bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,8 +287,7 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
                put_count = ttm_bo_del_from_lru(bo);
        spin_unlock(&glob->lru_lock);
 
-       while (put_count--)
-               kref_put(&bo->list_kref, ttm_bo_ref_bug);
+       ttm_bo_list_ref_sub(bo, put_count, true);
 
        return ret;
 }
@@ -496,8 +500,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                spin_unlock(&glob->lru_lock);
                ttm_bo_cleanup_memtype_use(bo);
 
-               while (put_count--)
-                       kref_put(&bo->list_kref, ttm_bo_ref_bug);
+               ttm_bo_list_ref_sub(bo, put_count, true);
 
                return;
        } else {
@@ -580,8 +583,7 @@ retry:
        spin_unlock(&glob->lru_lock);
        ttm_bo_cleanup_memtype_use(bo);
 
-       while (put_count--)
-               kref_put(&bo->list_kref, ttm_bo_ref_bug);
+       ttm_bo_list_ref_sub(bo, put_count, true);
 
        return 0;
 }
@@ -802,8 +804,7 @@ retry:
 
        BUG_ON(ret != 0);
 
-       while (put_count--)
-               kref_put(&bo->list_kref, ttm_bo_ref_bug);
+       ttm_bo_list_ref_sub(bo, put_count, true);
 
        ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
        ttm_bo_unreserve(bo);
@@ -1783,8 +1784,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
        put_count = ttm_bo_del_from_lru(bo);
        spin_unlock(&glob->lru_lock);
 
-       while (put_count--)
-               kref_put(&bo->list_kref, ttm_bo_ref_bug);
+       ttm_bo_list_ref_sub(bo, put_count, true);
 
        /**
         * Wait for GPU, then move to system cached.
index c285c2902d15f23780b726bd29b0e351147cfdca..201a71d111ec334726baa67758d09726ab019570 100644 (file)
 #include <linux/sched.h>
 #include <linux/module.h>
 
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
+{
+       struct ttm_validate_buffer *entry;
+
+       list_for_each_entry(entry, list, head) {
+               struct ttm_buffer_object *bo = entry->bo;
+               if (!entry->reserved)
+                       continue;
+
+               if (entry->removed) {
+                       ttm_bo_add_to_lru(bo);
+                       entry->removed = false;
+
+               }
+               entry->reserved = false;
+               atomic_set(&bo->reserved, 0);
+               wake_up_all(&bo->event_queue);
+       }
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+       struct ttm_validate_buffer *entry;
+
+       list_for_each_entry(entry, list, head) {
+               struct ttm_buffer_object *bo = entry->bo;
+               if (!entry->reserved)
+                       continue;
+
+               if (!entry->removed) {
+                       entry->put_count = ttm_bo_del_from_lru(bo);
+                       entry->removed = true;
+               }
+       }
+}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+       struct ttm_validate_buffer *entry;
+
+       list_for_each_entry(entry, list, head) {
+               struct ttm_buffer_object *bo = entry->bo;
+
+               if (entry->put_count) {
+                       ttm_bo_list_ref_sub(bo, entry->put_count, true);
+                       entry->put_count = 0;
+               }
+       }
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+                                        struct ttm_buffer_object *bo)
+{
+       struct ttm_bo_global *glob = bo->glob;
+       int ret;
+
+       ttm_eu_del_from_lru_locked(list);
+       spin_unlock(&glob->lru_lock);
+       ret = ttm_bo_wait_unreserved(bo, true);
+       spin_lock(&glob->lru_lock);
+       if (unlikely(ret != 0))
+               ttm_eu_backoff_reservation_locked(list);
+       return ret;
+}
+
+
 void ttm_eu_backoff_reservation(struct list_head *list)
 {
        struct ttm_validate_buffer *entry;
@@ -61,35 +127,71 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 
 int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
 {
+       struct ttm_bo_global *glob;
        struct ttm_validate_buffer *entry;
        int ret;
 
+       if (list_empty(list))
+               return 0;
+
+       list_for_each_entry(entry, list, head) {
+               entry->reserved = false;
+               entry->put_count = 0;
+               entry->removed = false;
+       }
+
+       entry = list_first_entry(list, struct ttm_validate_buffer, head);
+       glob = entry->bo->glob;
+
 retry:
+       spin_lock(&glob->lru_lock);
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
 
-               entry->reserved = false;
-               ret = ttm_bo_reserve(bo, true, false, true, val_seq);
-               if (ret != 0) {
-                       ttm_eu_backoff_reservation(list);
-                       if (ret == -EAGAIN) {
-                               ret = ttm_bo_wait_unreserved(bo, true);
-                               if (unlikely(ret != 0))
-                                       return ret;
-                               goto retry;
-                       } else
+retry_this_bo:
+               ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+               switch (ret) {
+               case 0:
+                       break;
+               case -EBUSY:
+                       ret = ttm_eu_wait_unreserved_locked(list, bo);
+                       if (unlikely(ret != 0)) {
+                               spin_unlock(&glob->lru_lock);
+                               ttm_eu_list_ref_sub(list);
+                               return ret;
+                       }
+                       goto retry_this_bo;
+               case -EAGAIN:
+                       ttm_eu_backoff_reservation_locked(list);
+                       spin_unlock(&glob->lru_lock);
+                       ttm_eu_list_ref_sub(list);
+                       ret = ttm_bo_wait_unreserved(bo, true);
+                       if (unlikely(ret != 0))
                                return ret;
+                       goto retry;
+               default:
+                       ttm_eu_backoff_reservation_locked(list);
+                       spin_unlock(&glob->lru_lock);
+                       ttm_eu_list_ref_sub(list);
+                       return ret;
                }
 
                entry->reserved = true;
                if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-                       ttm_eu_backoff_reservation(list);
+                       ttm_eu_backoff_reservation_locked(list);
+                       spin_unlock(&glob->lru_lock);
+                       ttm_eu_list_ref_sub(list);
                        ret = ttm_bo_wait_cpu(bo, false);
                        if (ret)
                                return ret;
                        goto retry;
                }
        }
+
+       ttm_eu_del_from_lru_locked(list);
+       spin_unlock(&glob->lru_lock);
+       ttm_eu_list_ref_sub(list);
+
        return 0;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
index beafc156a5353259d6b077ca57b2dd13af39f48d..b0fc9c12554be0cd8e443b55b4761654960deae0 100644 (file)
@@ -364,6 +364,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
  */
 extern void ttm_bo_unref(struct ttm_buffer_object **bo);
 
+
+/**
+ * ttm_bo_list_ref_sub
+ *
+ * @bo: The buffer object.
+ * @count: The number of references with which to decrease @bo::list_kref;
+ * @never_free: The refcount should not reach zero with this operation.
+ *
+ * Release @count lru list references to this buffer object.
+ */
+extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+                               bool never_free);
+
+/**
+ * ttm_bo_add_to_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Add this bo to the relevant mem type lru and, if it's backed by
+ * system pages (ttms) to the swap list.
+ * This function must be called with struct ttm_bo_global::lru_lock held, and
+ * is typically called immediately prior to unreserving a bo.
+ */
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_del_from_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Remove this bo from all lru lists used to lookup and reserve an object.
+ * This function must be called with struct ttm_bo_global::lru_lock held,
+ * and is usually called just immediately after the bo has been reserved to
+ * avoid recursive reservation from lru lists.
+ */
+extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+
+
 /**
  * ttm_bo_lock_delayed_workqueue
  *
index 8e0c848326b6df5ccc95fc4ae16b965d81bfa0c8..95068e6024db75773af18ff563f80c77e38033e3 100644 (file)
@@ -864,6 +864,20 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
                          bool interruptible,
                          bool no_wait, bool use_sequence, uint32_t sequence);
 
+
+/**
+ * ttm_bo_reserve_locked:
+ *
+ * Similar to ttm_bo_reserve, but must be called with the glob::lru_lock
+ * spinlock held, and will not remove reserved buffers from the lru lists.
+ * The function may release the LRU spinlock if it needs to sleep.
+ */
+
+extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+                                bool interruptible,
+                                bool no_wait, bool use_sequence,
+                                uint32_t sequence);
+
 /**
  * ttm_bo_unreserve
  *
index cd2c475da9eae790734d01eef7eff8c7f991263f..fd09b8438977c2eb40981bc220e044b40d038d35 100644 (file)
@@ -41,7 +41,9 @@
  * @bo:             refcounted buffer object pointer.
  * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
  * adding a new sync object.
- * @reservied:      Indicates whether @bo has been reserved for validation.
+ * @reserved:       Indicates whether @bo has been reserved for validation.
+ * @removed:        Indicates whether @bo has been removed from lru lists.
+ * @put_count:      Number of outstanding references on bo::list_kref.
  */
 
 struct ttm_validate_buffer {
@@ -49,6 +51,8 @@ struct ttm_validate_buffer {
        struct ttm_buffer_object *bo;
        void *new_sync_obj_arg;
        bool reserved;
+       bool removed;
+       int put_count;
 };
 
 /**