1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
9 #include <linux/rwsem.h>
10 #include <linux/sched.h>
11 #include <linux/init.h>
12 #include <linux/export.h>
15 * Guide to the rw_semaphore's count field for common values.
16 * (32-bit case illustrated, similar for 64-bit)
18 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
19 * X = #active_readers + #readers attempting to lock
22 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
23 * attempting to read lock or write lock.
25 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
26 * X = #active readers + # readers attempting lock
27 * (X*ACTIVE_BIAS + WAITING_BIAS)
28 * (2) 1 writer attempting lock, no waiters for lock
29 * X-1 = #active readers + #readers attempting lock
30 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
31 * (3) 1 writer active, no waiters for lock
32 * X-1 = #active readers + #readers attempting lock
33 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
35 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
36 * (WAITING_BIAS + ACTIVE_BIAS)
37 * (2) 1 writer active or attempting lock, no waiters for lock
40 * 0xffff0000 (1) There are writers or readers queued but none active
41 * or in the process of attempting lock.
43 * Note: writer can attempt to steal lock for this count by adding
44 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
46 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
47 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
49 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
50 * the count becomes more than 0 for successful lock acquisition,
51 * i.e. the case where there are only readers or nobody has lock.
52 * (1st and 2nd case above).
54 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
55 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
56 * acquisition (i.e. nobody else has lock or attempts lock). If
57 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
58 * are only waiters but none active (5th case above), and attempt to
64 * Initialize an rwsem:
66 void __init_rwsem(struct rw_semaphore *sem, const char *name,
67 struct lock_class_key *key)
69 #ifdef CONFIG_DEBUG_LOCK_ALLOC
71 * Make sure we are not reinitializing a held semaphore:
73 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
74 lockdep_init_map(&sem->dep_map, name, key, 0);
76 sem->count = RWSEM_UNLOCKED_VALUE;
77 raw_spin_lock_init(&sem->wait_lock);
78 INIT_LIST_HEAD(&sem->wait_list);
81 EXPORT_SYMBOL(__init_rwsem);
83 enum rwsem_waiter_type {
84 RWSEM_WAITING_FOR_WRITE,
85 RWSEM_WAITING_FOR_READ
89 struct list_head list;
90 struct task_struct *task;
91 enum rwsem_waiter_type type;
94 enum rwsem_wake_type {
95 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
96 RWSEM_WAKE_READERS, /* Wake readers only */
97 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
101 * handle the lock release when processes blocked on it that can now run
102 * - if we come here from up_xxxx(), then:
103 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
104 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
105 * - there must be someone on the queue
106 * - the spinlock must be held by the caller
107 * - woken process blocks are discarded from the list after having task zeroed
108 * - writers are only woken if downgrading is false
110 static struct rw_semaphore *
111 __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
113 struct rwsem_waiter *waiter;
114 struct task_struct *tsk;
115 struct list_head *next;
116 long oldcount, woken, loop, adjustment;
118 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
119 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
120 if (wake_type == RWSEM_WAKE_ANY)
121 /* Wake writer at the front of the queue, but do not
122 * grant it the lock yet as we want other writers
123 * to be able to steal it. Readers, on the other hand,
124 * will block as they will notice the queued writer.
126 wake_up_process(waiter->task);
130 /* Writers might steal the lock before we grant it to the next reader.
131 * We prefer to do the first reader grant before counting readers
132 * so we can bail out early if a writer stole the lock.
135 if (wake_type != RWSEM_WAKE_READ_OWNED) {
136 adjustment = RWSEM_ACTIVE_READ_BIAS;
138 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
139 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
140 /* A writer stole the lock. Undo our reader grant. */
141 if (rwsem_atomic_update(-adjustment, sem) &
144 /* Last active locker left. Retry waking readers. */
145 goto try_reader_grant;
149 /* Grant an infinite number of read locks to the readers at the front
150 * of the queue. Note we increment the 'active part' of the count by
151 * the number of readers before waking any processes up.
157 if (waiter->list.next == &sem->wait_list)
160 waiter = list_entry(waiter->list.next,
161 struct rwsem_waiter, list);
163 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
165 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
166 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
167 /* hit end of list above */
168 adjustment -= RWSEM_WAITING_BIAS;
171 rwsem_atomic_add(adjustment, sem);
173 next = sem->wait_list.next;
176 waiter = list_entry(next, struct rwsem_waiter, list);
177 next = waiter->list.next;
181 wake_up_process(tsk);
182 put_task_struct(tsk);
185 sem->wait_list.next = next;
186 next->prev = &sem->wait_list;
193 * wait for the read lock to be granted
196 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
198 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
199 struct rwsem_waiter waiter;
200 struct task_struct *tsk = current;
202 /* set up my own style of waitqueue */
204 waiter.type = RWSEM_WAITING_FOR_READ;
205 get_task_struct(tsk);
207 raw_spin_lock_irq(&sem->wait_lock);
208 if (list_empty(&sem->wait_list))
209 adjustment += RWSEM_WAITING_BIAS;
210 list_add_tail(&waiter.list, &sem->wait_list);
212 /* we're now waiting on the lock, but no longer actively locking */
213 count = rwsem_atomic_update(adjustment, sem);
215 /* If there are no active locks, wake the front queued process(es).
217 * If there are no writers and we are first in the queue,
218 * wake our own waiter to join the existing active readers !
220 if (count == RWSEM_WAITING_BIAS ||
221 (count > RWSEM_WAITING_BIAS &&
222 adjustment != -RWSEM_ACTIVE_READ_BIAS))
223 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
225 raw_spin_unlock_irq(&sem->wait_lock);
227 /* wait to be given the lock */
229 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
235 tsk->state = TASK_RUNNING;
241 * wait until we successfully acquire the write lock
244 struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
246 long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
247 struct rwsem_waiter waiter;
248 struct task_struct *tsk = current;
250 /* set up my own style of waitqueue */
252 waiter.type = RWSEM_WAITING_FOR_WRITE;
254 raw_spin_lock_irq(&sem->wait_lock);
255 if (list_empty(&sem->wait_list))
256 adjustment += RWSEM_WAITING_BIAS;
257 list_add_tail(&waiter.list, &sem->wait_list);
259 /* we're now waiting on the lock, but no longer actively locking */
260 count = rwsem_atomic_update(adjustment, sem);
262 /* If there were already threads queued before us and there are no
263 * active writers, the lock must be read owned; so we try to wake
264 * any read locks that were queued ahead of us. */
265 if (count > RWSEM_WAITING_BIAS &&
266 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
267 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
269 /* wait until we successfully acquire the lock */
270 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
272 if (!(count & RWSEM_ACTIVE_MASK)) {
273 /* Try acquiring the write lock. */
274 count = RWSEM_ACTIVE_WRITE_BIAS;
275 if (!list_is_singular(&sem->wait_list))
276 count += RWSEM_WAITING_BIAS;
278 if (sem->count == RWSEM_WAITING_BIAS &&
279 cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
284 raw_spin_unlock_irq(&sem->wait_lock);
286 /* Block until there are no active lockers. */
289 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
290 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
292 raw_spin_lock_irq(&sem->wait_lock);
295 list_del(&waiter.list);
296 raw_spin_unlock_irq(&sem->wait_lock);
297 tsk->state = TASK_RUNNING;
303 * handle waking up a waiter on the semaphore
304 * - up_read/up_write has decremented the active part of count if we come here
307 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
311 raw_spin_lock_irqsave(&sem->wait_lock, flags);
313 /* do nothing if list empty */
314 if (!list_empty(&sem->wait_list))
315 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
317 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
323 * downgrade a write lock into a read lock
324 * - caller incremented waiting part of count and discovered it still negative
325 * - just wake up any readers at the front of the queue
328 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
332 raw_spin_lock_irqsave(&sem->wait_lock, flags);
334 /* do nothing if list empty */
335 if (!list_empty(&sem->wait_list))
336 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
338 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
343 EXPORT_SYMBOL(rwsem_down_read_failed);
344 EXPORT_SYMBOL(rwsem_down_write_failed);
345 EXPORT_SYMBOL(rwsem_wake);
346 EXPORT_SYMBOL(rwsem_downgrade_wake);