]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/arc/include/asm/spinlock.h
rt2x00: rt2800pci: use module_pci_driver macro
[karo-tx-linux.git] / arch / arc / include / asm / spinlock.h
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
15
16 #define arch_spin_is_locked(x)  ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags)       arch_spin_lock(lock)
18 #define arch_spin_unlock_wait(x) \
19         do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20
21 static inline void arch_spin_lock(arch_spinlock_t *lock)
22 {
23         unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
24
25         __asm__ __volatile__(
26         "1:     ex  %0, [%1]            \n"
27         "       breq  %0, %2, 1b        \n"
28         : "+&r" (tmp)
29         : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
30         : "memory");
31 }
32
33 static inline int arch_spin_trylock(arch_spinlock_t *lock)
34 {
35         unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
36
37         __asm__ __volatile__(
38         "1:     ex  %0, [%1]            \n"
39         : "+r" (tmp)
40         : "r"(&(lock->slock))
41         : "memory");
42
43         return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
44 }
45
46 static inline void arch_spin_unlock(arch_spinlock_t *lock)
47 {
48         lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
49         smp_mb();
50 }
51
52 /*
53  * Read-write spinlocks, allowing multiple readers but only one writer.
54  *
55  * The spinlock itself is contained in @counter and access to it is
56  * serialized with @lock_mutex.
57  *
58  * Unfair locking as Writers could be starved indefinitely by Reader(s)
59  */
60
61 /* Would read_trylock() succeed? */
62 #define arch_read_can_lock(x)   ((x)->counter > 0)
63
64 /* Would write_trylock() succeed? */
65 #define arch_write_can_lock(x)  ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
66
67 /* 1 - lock taken successfully */
68 static inline int arch_read_trylock(arch_rwlock_t *rw)
69 {
70         int ret = 0;
71
72         arch_spin_lock(&(rw->lock_mutex));
73
74         /*
75          * zero means writer holds the lock exclusively, deny Reader.
76          * Otherwise grant lock to first/subseq reader
77          */
78         if (rw->counter > 0) {
79                 rw->counter--;
80                 ret = 1;
81         }
82
83         arch_spin_unlock(&(rw->lock_mutex));
84
85         smp_mb();
86         return ret;
87 }
88
89 /* 1 - lock taken successfully */
90 static inline int arch_write_trylock(arch_rwlock_t *rw)
91 {
92         int ret = 0;
93
94         arch_spin_lock(&(rw->lock_mutex));
95
96         /*
97          * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
98          * deny writer. Otherwise if unlocked grant to writer
99          * Hence the claim that Linux rwlocks are unfair to writers.
100          * (can be starved for an indefinite time by readers).
101          */
102         if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
103                 rw->counter = 0;
104                 ret = 1;
105         }
106         arch_spin_unlock(&(rw->lock_mutex));
107
108         return ret;
109 }
110
111 static inline void arch_read_lock(arch_rwlock_t *rw)
112 {
113         while (!arch_read_trylock(rw))
114                 cpu_relax();
115 }
116
117 static inline void arch_write_lock(arch_rwlock_t *rw)
118 {
119         while (!arch_write_trylock(rw))
120                 cpu_relax();
121 }
122
123 static inline void arch_read_unlock(arch_rwlock_t *rw)
124 {
125         arch_spin_lock(&(rw->lock_mutex));
126         rw->counter++;
127         arch_spin_unlock(&(rw->lock_mutex));
128 }
129
130 static inline void arch_write_unlock(arch_rwlock_t *rw)
131 {
132         arch_spin_lock(&(rw->lock_mutex));
133         rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
134         arch_spin_unlock(&(rw->lock_mutex));
135 }
136
137 #define arch_read_lock_flags(lock, flags)       arch_read_lock(lock)
138 #define arch_write_lock_flags(lock, flags)      arch_write_lock(lock)
139
140 #define arch_spin_relax(lock)   cpu_relax()
141 #define arch_read_relax(lock)   cpu_relax()
142 #define arch_write_relax(lock)  cpu_relax()
143
144 #endif /* __ASM_SPINLOCK_H */