]> git.karo-electronics.de Git - karo-tx-linux.git/blob - lib/lockref.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[karo-tx-linux.git] / lib / lockref.c
1 #include <linux/export.h>
2 #include <linux/lockref.h>
3
4 #if USE_CMPXCHG_LOCKREF
5
6 /*
7  * Allow weakly-ordered memory architectures to provide barrier-less
8  * cmpxchg semantics for lockref updates.
9  */
10 #ifndef cmpxchg64_relaxed
11 # define cmpxchg64_relaxed cmpxchg64
12 #endif
13
14 /*
15  * Note that the "cmpxchg()" reloads the "old" value for the
16  * failure case.
17  */
18 #define CMPXCHG_LOOP(CODE, SUCCESS) do {                                        \
19         struct lockref old;                                                     \
20         BUILD_BUG_ON(sizeof(old) != 8);                                         \
21         old.lock_count = READ_ONCE(lockref->lock_count);                        \
22         while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
23                 struct lockref new = old, prev = old;                           \
24                 CODE                                                            \
25                 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,        \
26                                                    old.lock_count,              \
27                                                    new.lock_count);             \
28                 if (likely(old.lock_count == prev.lock_count)) {                \
29                         SUCCESS;                                                \
30                 }                                                               \
31                 cpu_relax_lowlatency();                                         \
32         }                                                                       \
33 } while (0)
34
35 #else
36
37 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
38
39 #endif
40
41 /**
42  * lockref_get - Increments reference count unconditionally
43  * @lockref: pointer to lockref structure
44  *
45  * This operation is only valid if you already hold a reference
46  * to the object, so you know the count cannot be zero.
47  */
48 void lockref_get(struct lockref *lockref)
49 {
50         CMPXCHG_LOOP(
51                 new.count++;
52         ,
53                 return;
54         );
55
56         spin_lock(&lockref->lock);
57         lockref->count++;
58         spin_unlock(&lockref->lock);
59 }
60 EXPORT_SYMBOL(lockref_get);
61
62 /**
63  * lockref_get_not_zero - Increments count unless the count is 0 or dead
64  * @lockref: pointer to lockref structure
65  * Return: 1 if count updated successfully or 0 if count was zero
66  */
67 int lockref_get_not_zero(struct lockref *lockref)
68 {
69         int retval;
70
71         CMPXCHG_LOOP(
72                 new.count++;
73                 if (old.count <= 0)
74                         return 0;
75         ,
76                 return 1;
77         );
78
79         spin_lock(&lockref->lock);
80         retval = 0;
81         if (lockref->count > 0) {
82                 lockref->count++;
83                 retval = 1;
84         }
85         spin_unlock(&lockref->lock);
86         return retval;
87 }
88 EXPORT_SYMBOL(lockref_get_not_zero);
89
90 /**
91  * lockref_get_or_lock - Increments count unless the count is 0 or dead
92  * @lockref: pointer to lockref structure
93  * Return: 1 if count updated successfully or 0 if count was zero
94  * and we got the lock instead.
95  */
96 int lockref_get_or_lock(struct lockref *lockref)
97 {
98         CMPXCHG_LOOP(
99                 new.count++;
100                 if (old.count <= 0)
101                         break;
102         ,
103                 return 1;
104         );
105
106         spin_lock(&lockref->lock);
107         if (lockref->count <= 0)
108                 return 0;
109         lockref->count++;
110         spin_unlock(&lockref->lock);
111         return 1;
112 }
113 EXPORT_SYMBOL(lockref_get_or_lock);
114
115 /**
116  * lockref_put_return - Decrement reference count if possible
117  * @lockref: pointer to lockref structure
118  *
119  * Decrement the reference count and return the new value.
120  * If the lockref was dead or locked, return an error.
121  */
122 int lockref_put_return(struct lockref *lockref)
123 {
124         CMPXCHG_LOOP(
125                 new.count--;
126                 if (old.count <= 0)
127                         return -1;
128         ,
129                 return new.count;
130         );
131         return -1;
132 }
133 EXPORT_SYMBOL(lockref_put_return);
134
135 /**
136  * lockref_put_or_lock - decrements count unless count <= 1 before decrement
137  * @lockref: pointer to lockref structure
138  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
139  */
140 int lockref_put_or_lock(struct lockref *lockref)
141 {
142         CMPXCHG_LOOP(
143                 new.count--;
144                 if (old.count <= 1)
145                         break;
146         ,
147                 return 1;
148         );
149
150         spin_lock(&lockref->lock);
151         if (lockref->count <= 1)
152                 return 0;
153         lockref->count--;
154         spin_unlock(&lockref->lock);
155         return 1;
156 }
157 EXPORT_SYMBOL(lockref_put_or_lock);
158
159 /**
160  * lockref_mark_dead - mark lockref dead
161  * @lockref: pointer to lockref structure
162  */
163 void lockref_mark_dead(struct lockref *lockref)
164 {
165         assert_spin_locked(&lockref->lock);
166         lockref->count = -128;
167 }
168 EXPORT_SYMBOL(lockref_mark_dead);
169
170 /**
171  * lockref_get_not_dead - Increments count unless the ref is dead
172  * @lockref: pointer to lockref structure
173  * Return: 1 if count updated successfully or 0 if lockref was dead
174  */
175 int lockref_get_not_dead(struct lockref *lockref)
176 {
177         int retval;
178
179         CMPXCHG_LOOP(
180                 new.count++;
181                 if (old.count < 0)
182                         return 0;
183         ,
184                 return 1;
185         );
186
187         spin_lock(&lockref->lock);
188         retval = 0;
189         if (lockref->count >= 0) {
190                 lockref->count++;
191                 retval = 1;
192         }
193         spin_unlock(&lockref->lock);
194         return retval;
195 }
196 EXPORT_SYMBOL(lockref_get_not_dead);