]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - lib/rwsem.c
hugetlb: add allocate function for hugepage migration
[mv-sheeva.git] / lib / rwsem.c
index 917fd946b4951147ba2971fe86c3958b33d63434..f236d7cd5cf3e34bab6955613a773aeda8914039 100644 (file)
@@ -36,6 +36,14 @@ struct rwsem_waiter {
 #define RWSEM_WAITING_FOR_WRITE        0x00000002
 };
 
+/* Wake types for __rwsem_do_wake().  Note that RWSEM_WAKE_NO_ACTIVE and
+ * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held
+ * since the rwsem value was observed.
+ */
+#define RWSEM_WAKE_ANY        0 /* Wake whatever's at head of wait list */
+#define RWSEM_WAKE_NO_ACTIVE  1 /* rwsem was observed with no active thread */
+#define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
+
 /*
  * handle the lock release when processes blocked on it that can now run
  * - if we come here from up_xxxx(), then:
@@ -46,28 +54,34 @@ struct rwsem_waiter {
  * - woken process blocks are discarded from the list after having task zeroed
  * - writers are only woken if downgrading is false
  */
-static inline struct rw_semaphore *
-__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
+static struct rw_semaphore *
+__rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
 {
        struct rwsem_waiter *waiter;
        struct task_struct *tsk;
        struct list_head *next;
-       signed long oldcount, woken, loop;
+       signed long oldcount, woken, loop, adjustment;
 
        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
        if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
                goto readers_only;
 
-       if (downgrading)
+       if (wake_type == RWSEM_WAKE_READ_OWNED)
+               /* Another active reader was observed, so wakeup is not
+                * likely to succeed. Save the atomic op.
+                */
                goto out;
 
        /* There's a writer at the front of the queue - try to grant it the
         * write lock.  However, we only wake this writer if we can transition
         * the active part of the count from 0 -> 1
         */
+       adjustment = RWSEM_ACTIVE_WRITE_BIAS;
+       if (waiter->list.next == &sem->wait_list)
+               adjustment -= RWSEM_WAITING_BIAS;
+
  try_again_write:
-       oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
-                                               - RWSEM_ACTIVE_BIAS;
+       oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
        if (oldcount & RWSEM_ACTIVE_MASK)
                /* Someone grabbed the sem already */
                goto undo_write;
@@ -85,19 +99,25 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
        goto out;
 
  readers_only:
-       if (downgrading)
-               goto wake_readers;
-
-       /* if we came through an up_xxxx() call, we only only wake someone up
-        * if we can transition the active part of the count from 0 -> 1 */
- try_again_read:
-       oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
-                                               - RWSEM_ACTIVE_BIAS;
-       if (oldcount & RWSEM_ACTIVE_MASK)
-               /* Someone grabbed the sem already */
-               goto undo_read;
+       /* If we come here from up_xxxx(), another thread might have reached
+        * rwsem_down_failed_common() before we acquired the spinlock and
+        * woken up a waiter, making it now active.  We prefer to check for
+        * this first in order to not spend too much time with the spinlock
+        * held if we're not going to be able to wake up readers in the end.
+        *
+        * Note that we do not need to update the rwsem count: any writer
+        * trying to acquire rwsem will run rwsem_down_write_failed() due
+        * to the waiting threads and block trying to acquire the spinlock.
+        *
+        * We use a dummy atomic update in order to acquire the cache line
+        * exclusively since we expect to succeed and run the final rwsem
+        * count adjustment pretty soon.
+        */
+       if (wake_type == RWSEM_WAKE_ANY &&
+           rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS)
+               /* Someone grabbed the sem for write already */
+               goto out;
 
- wake_readers:
        /* Grant an infinite number of read locks to the readers at the front
         * of the queue.  Note we increment the 'active part' of the count by
         * the number of readers before waking any processes up.
@@ -114,16 +134,15 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
 
        } while (waiter->flags & RWSEM_WAITING_FOR_READ);
 
-       loop = woken;
-       woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
-       if (!downgrading)
-               /* we'd already done one increment earlier */
-               woken -= RWSEM_ACTIVE_BIAS;
+       adjustment = woken * RWSEM_ACTIVE_READ_BIAS;
+       if (waiter->flags & RWSEM_WAITING_FOR_READ)
+               /* hit end of list above */
+               adjustment -= RWSEM_WAITING_BIAS;
 
-       rwsem_atomic_add(woken, sem);
+       rwsem_atomic_add(adjustment, sem);
 
        next = sem->wait_list.next;
-       for (; loop > 0; loop--) {
+       for (loop = woken; loop > 0; loop--) {
                waiter = list_entry(next, struct rwsem_waiter, list);
                next = waiter->list.next;
                tsk = waiter->task;
@@ -142,13 +161,9 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
        /* undo the change to the active count, but check for a transition
         * 1->0 */
  undo_write:
-       if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
+       if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
                goto out;
        goto try_again_write;
- undo_read:
-       if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
-               goto out;
-       goto try_again_read;
 }
 
 /*
@@ -156,8 +171,9 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
  */
 static struct rw_semaphore __sched *
 rwsem_down_failed_common(struct rw_semaphore *sem,
-                       struct rwsem_waiter *waiter, signed long adjustment)
+                        unsigned int flags, signed long adjustment)
 {
+       struct rwsem_waiter waiter;
        struct task_struct *tsk = current;
        signed long count;
 
@@ -165,23 +181,34 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
 
        /* set up my own style of waitqueue */
        spin_lock_irq(&sem->wait_lock);
-       waiter->task = tsk;
+       waiter.task = tsk;
+       waiter.flags = flags;
        get_task_struct(tsk);
 
-       list_add_tail(&waiter->list, &sem->wait_list);
+       if (list_empty(&sem->wait_list))
+               adjustment += RWSEM_WAITING_BIAS;
+       list_add_tail(&waiter.list, &sem->wait_list);
 
-       /* we're now waiting on the lock, but no longer actively read-locking */
+       /* we're now waiting on the lock, but no longer actively locking */
        count = rwsem_atomic_update(adjustment, sem);
 
-       /* if there are no active locks, wake the front queued process(es) up */
-       if (!(count & RWSEM_ACTIVE_MASK))
-               sem = __rwsem_do_wake(sem, 0);
+       /* If there are no active locks, wake the front queued process(es) up.
+        *
+        * Alternatively, if we're called from a failed down_write(), there
+        * were already threads queued before us and there are no active
+        * writers, the lock must be read owned; so we try to wake any read
+        * locks that were queued ahead of us. */
+       if (count == RWSEM_WAITING_BIAS)
+               sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE);
+       else if (count > RWSEM_WAITING_BIAS &&
+                adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
+               sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
 
        spin_unlock_irq(&sem->wait_lock);
 
        /* wait to be given the lock */
        for (;;) {
-               if (!waiter->task)
+               if (!waiter.task)
                        break;
                schedule();
                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
@@ -198,12 +225,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
 asmregparm struct rw_semaphore __sched *
 rwsem_down_read_failed(struct rw_semaphore *sem)
 {
-       struct rwsem_waiter waiter;
-
-       waiter.flags = RWSEM_WAITING_FOR_READ;
-       rwsem_down_failed_common(sem, &waiter,
-                               RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
-       return sem;
+       return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
+                                       -RWSEM_ACTIVE_READ_BIAS);
 }
 
 /*
@@ -212,12 +235,8 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
 asmregparm struct rw_semaphore __sched *
 rwsem_down_write_failed(struct rw_semaphore *sem)
 {
-       struct rwsem_waiter waiter;
-
-       waiter.flags = RWSEM_WAITING_FOR_WRITE;
-       rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
-
-       return sem;
+       return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE,
+                                       -RWSEM_ACTIVE_WRITE_BIAS);
 }
 
 /*
@@ -232,7 +251,7 @@ asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 
        /* do nothing if list empty */
        if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem, 0);
+               sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
 
        spin_unlock_irqrestore(&sem->wait_lock, flags);
 
@@ -252,7 +271,7 @@ asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
 
        /* do nothing if list empty */
        if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem, 1);
+               sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
 
        spin_unlock_irqrestore(&sem->wait_lock, flags);