]> git.karo-electronics.de Git - mv-sheeva.git/blob - fs/gfs2/glock.c
[GFS2] Clean up the glock core
[mv-sheeva.git] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43
44 struct gfs2_gl_hash_bucket {
45         struct hlist_head hb_list;
46 };
47
48 struct gfs2_glock_iter {
49         int hash;                       /* hash bucket index         */
50         struct gfs2_sbd *sdp;           /* incore superblock         */
51         struct gfs2_glock *gl;          /* current glock struct      */
52         char string[512];               /* scratch space             */
53 };
54
55 typedef void (*glock_examiner) (struct gfs2_glock * gl);
56
57 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
58 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
59 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61
62 static DECLARE_RWSEM(gfs2_umount_flush_sem);
63 static struct dentry *gfs2_root;
64 static struct task_struct *scand_process;
65 static unsigned int scand_secs = 5;
66 static struct workqueue_struct *glock_workqueue;
67
68 #define GFS2_GL_HASH_SHIFT      15
69 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
70 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
71
72 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
73 static struct dentry *gfs2_root;
74
75 /*
76  * Despite what you might think, the numbers below are not arbitrary :-)
77  * They are taken from the ipv4 routing hash code, which is well tested
78  * and thus should be nearly optimal. Later on we might tweek the numbers
79  * but for now this should be fine.
80  *
81  * The reason for putting the locks in a separate array from the list heads
82  * is that we can have fewer locks than list heads and save memory. We use
83  * the same hash function for both, but with a different hash mask.
84  */
85 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
86         defined(CONFIG_PROVE_LOCKING)
87
88 #ifdef CONFIG_LOCKDEP
89 # define GL_HASH_LOCK_SZ        256
90 #else
91 # if NR_CPUS >= 32
92 #  define GL_HASH_LOCK_SZ       4096
93 # elif NR_CPUS >= 16
94 #  define GL_HASH_LOCK_SZ       2048
95 # elif NR_CPUS >= 8
96 #  define GL_HASH_LOCK_SZ       1024
97 # elif NR_CPUS >= 4
98 #  define GL_HASH_LOCK_SZ       512
99 # else
100 #  define GL_HASH_LOCK_SZ       256
101 # endif
102 #endif
103
104 /* We never want more locks than chains */
105 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
106 # undef GL_HASH_LOCK_SZ
107 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
108 #endif
109
110 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
111
112 static inline rwlock_t *gl_lock_addr(unsigned int x)
113 {
114         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
115 }
116 #else /* not SMP, so no spinlocks required */
117 static inline rwlock_t *gl_lock_addr(unsigned int x)
118 {
119         return NULL;
120 }
121 #endif
122
123 /**
124  * gl_hash() - Turn glock number into hash bucket number
125  * @lock: The glock number
126  *
127  * Returns: The number of the corresponding hash bucket
128  */
129
130 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
131                             const struct lm_lockname *name)
132 {
133         unsigned int h;
134
135         h = jhash(&name->ln_number, sizeof(u64), 0);
136         h = jhash(&name->ln_type, sizeof(unsigned int), h);
137         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
138         h &= GFS2_GL_HASH_MASK;
139
140         return h;
141 }
142
143 /**
144  * glock_free() - Perform a few checks and then release struct gfs2_glock
145  * @gl: The glock to release
146  *
147  * Also calls lock module to release its internal structure for this glock.
148  *
149  */
150
151 static void glock_free(struct gfs2_glock *gl)
152 {
153         struct gfs2_sbd *sdp = gl->gl_sbd;
154         struct inode *aspace = gl->gl_aspace;
155
156         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
157                 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
158
159         if (aspace)
160                 gfs2_aspace_put(aspace);
161
162         kmem_cache_free(gfs2_glock_cachep, gl);
163 }
164
165 /**
166  * gfs2_glock_hold() - increment reference count on glock
167  * @gl: The glock to hold
168  *
169  */
170
171 static void gfs2_glock_hold(struct gfs2_glock *gl)
172 {
173         atomic_inc(&gl->gl_ref);
174 }
175
176 /**
177  * gfs2_glock_put() - Decrement reference count on glock
178  * @gl: The glock to put
179  *
180  */
181
182 int gfs2_glock_put(struct gfs2_glock *gl)
183 {
184         int rv = 0;
185
186         write_lock(gl_lock_addr(gl->gl_hash));
187         if (atomic_dec_and_test(&gl->gl_ref)) {
188                 hlist_del(&gl->gl_list);
189                 write_unlock(gl_lock_addr(gl->gl_hash));
190                 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
191                 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim));
192                 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
193                 glock_free(gl);
194                 rv = 1;
195                 goto out;
196         }
197         write_unlock(gl_lock_addr(gl->gl_hash));
198 out:
199         return rv;
200 }
201
202 /**
203  * search_bucket() - Find struct gfs2_glock by lock number
204  * @bucket: the bucket to search
205  * @name: The lock name
206  *
207  * Returns: NULL, or the struct gfs2_glock with the requested number
208  */
209
210 static struct gfs2_glock *search_bucket(unsigned int hash,
211                                         const struct gfs2_sbd *sdp,
212                                         const struct lm_lockname *name)
213 {
214         struct gfs2_glock *gl;
215         struct hlist_node *h;
216
217         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
218                 if (!lm_name_equal(&gl->gl_name, name))
219                         continue;
220                 if (gl->gl_sbd != sdp)
221                         continue;
222
223                 atomic_inc(&gl->gl_ref);
224
225                 return gl;
226         }
227
228         return NULL;
229 }
230
231 /**
232  * gfs2_glock_find() - Find glock by lock number
233  * @sdp: The GFS2 superblock
234  * @name: The lock name
235  *
236  * Returns: NULL, or the struct gfs2_glock with the requested number
237  */
238
239 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
240                                           const struct lm_lockname *name)
241 {
242         unsigned int hash = gl_hash(sdp, name);
243         struct gfs2_glock *gl;
244
245         read_lock(gl_lock_addr(hash));
246         gl = search_bucket(hash, sdp, name);
247         read_unlock(gl_lock_addr(hash));
248
249         return gl;
250 }
251
252 /**
253  * may_grant - check if its ok to grant a new lock
254  * @gl: The glock
255  * @gh: The lock request which we wish to grant
256  *
257  * Returns: true if its ok to grant the lock
258  */
259
260 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
261 {
262         const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
263         if ((gh->gh_state == LM_ST_EXCLUSIVE ||
264              gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
265                 return 0;
266         if (gl->gl_state == gh->gh_state)
267                 return 1;
268         if (gh->gh_flags & GL_EXACT)
269                 return 0;
270         if (gh->gh_state == LM_ST_SHARED && gl->gl_state == LM_ST_EXCLUSIVE)
271                 return 1;
272         if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
273                 return 1;
274         return 0;
275 }
276
277 static void gfs2_holder_wake(struct gfs2_holder *gh)
278 {
279         clear_bit(HIF_WAIT, &gh->gh_iflags);
280         smp_mb__after_clear_bit();
281         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
282 }
283
284 /**
285  * do_promote - promote as many requests as possible on the current queue
286  * @gl: The glock
287  * 
288  * Returns: true if there is a blocked holder at the head of the list
289  */
290
291 static int do_promote(struct gfs2_glock *gl)
292 {
293         const struct gfs2_glock_operations *glops = gl->gl_ops;
294         struct gfs2_holder *gh, *tmp;
295         int ret;
296
297 restart:
298         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
299                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
300                         continue;
301                 if (may_grant(gl, gh)) {
302                         if (gh->gh_list.prev == &gl->gl_holders &&
303                             glops->go_lock) {
304                                 spin_unlock(&gl->gl_spin);
305                                 /* FIXME: eliminate this eventually */
306                                 ret = glops->go_lock(gh);
307                                 spin_lock(&gl->gl_spin);
308                                 if (ret) {
309                                         gh->gh_error = ret;
310                                         list_del_init(&gh->gh_list);
311                                         gfs2_holder_wake(gh);
312                                         goto restart;
313                                 }
314                                 set_bit(HIF_HOLDER, &gh->gh_iflags);
315                                 gfs2_holder_wake(gh);
316                                 goto restart;
317                         }
318                         set_bit(HIF_HOLDER, &gh->gh_iflags);
319                         gfs2_holder_wake(gh);
320                         continue;
321                 }
322                 if (gh->gh_list.prev == &gl->gl_holders)
323                         return 1;
324                 break;
325         }
326         return 0;
327 }
328
329 /**
330  * do_error - Something unexpected has happened during a lock request
331  *
332  */
333
334 static inline void do_error(struct gfs2_glock *gl, const int ret)
335 {
336         struct gfs2_holder *gh, *tmp;
337
338         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
339                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
340                         continue;
341                 if (ret & LM_OUT_ERROR)
342                         gh->gh_error = -EIO;
343                 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
344                         gh->gh_error = GLR_TRYFAILED;
345                 else
346                         continue;
347                 list_del_init(&gh->gh_list);
348                 gfs2_holder_wake(gh);
349         }
350 }
351
352 /**
353  * find_first_waiter - find the first gh that's waiting for the glock
354  * @gl: the glock
355  */
356
357 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
358 {
359         struct gfs2_holder *gh;
360
361         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
362                 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
363                         return gh;
364         }
365         return NULL;
366 }
367
368 /**
369  * state_change - record that the glock is now in a different state
370  * @gl: the glock
371  * @new_state the new state
372  *
373  */
374
375 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
376 {
377         int held1, held2;
378
379         held1 = (gl->gl_state != LM_ST_UNLOCKED);
380         held2 = (new_state != LM_ST_UNLOCKED);
381
382         if (held1 != held2) {
383                 if (held2)
384                         gfs2_glock_hold(gl);
385                 else
386                         gfs2_glock_put(gl);
387         }
388
389         gl->gl_state = new_state;
390         gl->gl_tchange = jiffies;
391 }
392
393 static void gfs2_demote_wake(struct gfs2_glock *gl)
394 {
395         gl->gl_demote_state = LM_ST_EXCLUSIVE;
396         clear_bit(GLF_DEMOTE, &gl->gl_flags);
397         smp_mb__after_clear_bit();
398         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
399 }
400
401 /**
402  * finish_xmote - The DLM has replied to one of our lock requests
403  * @gl: The glock
404  * @ret: The status from the DLM
405  *
406  */
407
408 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
409 {
410         const struct gfs2_glock_operations *glops = gl->gl_ops;
411         struct gfs2_holder *gh;
412         unsigned state = ret & LM_OUT_ST_MASK;
413
414         spin_lock(&gl->gl_spin);
415         state_change(gl, state);
416         gh = find_first_waiter(gl);
417
418         /* Demote to UN request arrived during demote to SH or DF */
419         if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
420             state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
421                 gl->gl_target = LM_ST_UNLOCKED;
422
423         /* Check for state != intended state */
424         if (unlikely(state != gl->gl_target)) {
425                 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
426                         /* move to back of queue and try next entry */
427                         if (ret & LM_OUT_CANCELED) {
428                                 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
429                                         list_move_tail(&gh->gh_list, &gl->gl_holders);
430                                 gh = find_first_waiter(gl);
431                                 gl->gl_target = gh->gh_state;
432                                 goto retry;
433                         }
434                         /* Some error or failed "try lock" - report it */
435                         if ((ret & LM_OUT_ERROR) ||
436                             (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
437                                 gl->gl_target = gl->gl_state;
438                                 do_error(gl, ret);
439                                 goto out;
440                         }
441                 }
442                 switch(state) {
443                 /* Unlocked due to conversion deadlock, try again */
444                 case LM_ST_UNLOCKED:
445 retry:
446                         do_xmote(gl, gh, gl->gl_target);
447                         break;
448                 /* Conversion fails, unlock and try again */
449                 case LM_ST_SHARED:
450                 case LM_ST_DEFERRED:
451                         do_xmote(gl, gh, LM_ST_UNLOCKED);
452                         break;
453                 default: /* Everything else */
454                         printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
455                         GLOCK_BUG_ON(gl, 1);
456                 }
457                 spin_unlock(&gl->gl_spin);
458                 gfs2_glock_put(gl);
459                 return;
460         }
461
462         /* Fast path - we got what we asked for */
463         if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
464                 gfs2_demote_wake(gl);
465         if (state != LM_ST_UNLOCKED) {
466                 if (glops->go_xmote_bh) {
467                         int rv;
468                         spin_unlock(&gl->gl_spin);
469                         rv = glops->go_xmote_bh(gl, gh);
470                         if (rv == -EAGAIN)
471                                 return;
472                         spin_lock(&gl->gl_spin);
473                         if (rv) {
474                                 do_error(gl, rv);
475                                 goto out;
476                         }
477                 }
478                 do_promote(gl);
479         }
480 out:
481         clear_bit(GLF_LOCK, &gl->gl_flags);
482         spin_unlock(&gl->gl_spin);
483         gfs2_glock_put(gl);
484 }
485
486 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
487                                  unsigned int cur_state, unsigned int req_state,
488                                  unsigned int flags)
489 {
490         int ret = LM_OUT_ERROR;
491         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
492                 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
493                                                          req_state, flags);
494         return ret;
495 }
496
497 /**
498  * do_xmote - Calls the DLM to change the state of a lock
499  * @gl: The lock state
500  * @gh: The holder (only for promotes)
501  * @target: The target lock state
502  *
503  */
504
505 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
506 {
507         const struct gfs2_glock_operations *glops = gl->gl_ops;
508         struct gfs2_sbd *sdp = gl->gl_sbd;
509         unsigned int lck_flags = gh ? gh->gh_flags : 0;
510         int ret;
511
512         lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
513                       LM_FLAG_PRIORITY);
514         BUG_ON(gl->gl_state == target);
515         BUG_ON(gl->gl_state == gl->gl_target);
516         if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
517             glops->go_inval) {
518                 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
519                 do_error(gl, 0); /* Fail queued try locks */
520         }
521         spin_unlock(&gl->gl_spin);
522         if (glops->go_xmote_th)
523                 glops->go_xmote_th(gl);
524         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
525                 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
526         clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
527
528         gfs2_glock_hold(gl);
529         if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
530             gl->gl_state == LM_ST_DEFERRED) &&
531             !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
532                 lck_flags |= LM_FLAG_TRY_1CB;
533         ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags);
534
535         if (!(ret & LM_OUT_ASYNC)) {
536                 finish_xmote(gl, ret);
537                 gfs2_glock_hold(gl);
538                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
539                         gfs2_glock_put(gl);
540         } else {
541                 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
542         }
543         spin_lock(&gl->gl_spin);
544 }
545
546 /**
547  * find_first_holder - find the first "holder" gh
548  * @gl: the glock
549  */
550
551 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
552 {
553         struct gfs2_holder *gh;
554
555         if (!list_empty(&gl->gl_holders)) {
556                 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
557                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
558                         return gh;
559         }
560         return NULL;
561 }
562
563 /**
564  * run_queue - do all outstanding tasks related to a glock
565  * @gl: The glock in question
566  * @nonblock: True if we must not block in run_queue
567  *
568  */
569
570 static void run_queue(struct gfs2_glock *gl, const int nonblock)
571 {
572         struct gfs2_holder *gh = NULL;
573
574         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
575                 return;
576
577         GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
578
579         if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
580             gl->gl_demote_state != gl->gl_state) {
581                 if (find_first_holder(gl))
582                         goto out;
583                 if (nonblock)
584                         goto out_sched;
585                 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
586                 gl->gl_target = gl->gl_demote_state;
587         } else {
588                 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
589                         gfs2_demote_wake(gl);
590                 if (do_promote(gl) == 0)
591                         goto out;
592                 gh = find_first_waiter(gl);
593                 gl->gl_target = gh->gh_state;
594                 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
595                         do_error(gl, 0); /* Fail queued try locks */
596         }
597         do_xmote(gl, gh, gl->gl_target);
598         return;
599
600 out_sched:
601         gfs2_glock_hold(gl);
602         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
603                 gfs2_glock_put(gl);
604 out:
605         clear_bit(GLF_LOCK, &gl->gl_flags);
606 }
607
608 static void glock_work_func(struct work_struct *work)
609 {
610         unsigned long delay = 0;
611         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
612
613         if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
614                 finish_xmote(gl, gl->gl_reply);
615         spin_lock(&gl->gl_spin);
616         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) {
617                 unsigned long holdtime, now = jiffies;
618                 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
619                 if (time_before(now, holdtime))
620                         delay = holdtime - now;
621                 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
622         }
623         run_queue(gl, 0);
624         spin_unlock(&gl->gl_spin);
625         if (!delay ||
626             queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
627                 gfs2_glock_put(gl);
628 }
629
630 static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
631                      void **lockp)
632 {
633         int error = -EIO;
634         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
635                 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
636                                 sdp->sd_lockstruct.ls_lockspace, name, lockp);
637         return error;
638 }
639
640 /**
641  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
642  * @sdp: The GFS2 superblock
643  * @number: the lock number
644  * @glops: The glock_operations to use
645  * @create: If 0, don't create the glock if it doesn't exist
646  * @glp: the glock is returned here
647  *
648  * This does not lock a glock, just finds/creates structures for one.
649  *
650  * Returns: errno
651  */
652
653 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
654                    const struct gfs2_glock_operations *glops, int create,
655                    struct gfs2_glock **glp)
656 {
657         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
658         struct gfs2_glock *gl, *tmp;
659         unsigned int hash = gl_hash(sdp, &name);
660         int error;
661
662         read_lock(gl_lock_addr(hash));
663         gl = search_bucket(hash, sdp, &name);
664         read_unlock(gl_lock_addr(hash));
665
666         if (gl || !create) {
667                 *glp = gl;
668                 return 0;
669         }
670
671         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
672         if (!gl)
673                 return -ENOMEM;
674
675         gl->gl_flags = 0;
676         gl->gl_name = name;
677         atomic_set(&gl->gl_ref, 1);
678         gl->gl_state = LM_ST_UNLOCKED;
679         gl->gl_target = LM_ST_UNLOCKED;
680         gl->gl_demote_state = LM_ST_EXCLUSIVE;
681         gl->gl_hash = hash;
682         gl->gl_ops = glops;
683         gl->gl_stamp = jiffies;
684         gl->gl_tchange = jiffies;
685         gl->gl_object = NULL;
686         gl->gl_sbd = sdp;
687         gl->gl_aspace = NULL;
688         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
689
690         /* If this glock protects actual on-disk data or metadata blocks,
691            create a VFS inode to manage the pages/buffers holding them. */
692         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
693                 gl->gl_aspace = gfs2_aspace_get(sdp);
694                 if (!gl->gl_aspace) {
695                         error = -ENOMEM;
696                         goto fail;
697                 }
698         }
699
700         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
701         if (error)
702                 goto fail_aspace;
703
704         write_lock(gl_lock_addr(hash));
705         tmp = search_bucket(hash, sdp, &name);
706         if (tmp) {
707                 write_unlock(gl_lock_addr(hash));
708                 glock_free(gl);
709                 gl = tmp;
710         } else {
711                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
712                 write_unlock(gl_lock_addr(hash));
713         }
714
715         *glp = gl;
716
717         return 0;
718
719 fail_aspace:
720         if (gl->gl_aspace)
721                 gfs2_aspace_put(gl->gl_aspace);
722 fail:
723         kmem_cache_free(gfs2_glock_cachep, gl);
724         return error;
725 }
726
727 /**
728  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
729  * @gl: the glock
730  * @state: the state we're requesting
731  * @flags: the modifier flags
732  * @gh: the holder structure
733  *
734  */
735
736 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
737                       struct gfs2_holder *gh)
738 {
739         INIT_LIST_HEAD(&gh->gh_list);
740         gh->gh_gl = gl;
741         gh->gh_ip = (unsigned long)__builtin_return_address(0);
742         gh->gh_owner_pid = get_pid(task_pid(current));
743         gh->gh_state = state;
744         gh->gh_flags = flags;
745         gh->gh_error = 0;
746         gh->gh_iflags = 0;
747         gfs2_glock_hold(gl);
748 }
749
750 /**
751  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
752  * @state: the state we're requesting
753  * @flags: the modifier flags
754  * @gh: the holder structure
755  *
756  * Don't mess with the glock.
757  *
758  */
759
760 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
761 {
762         gh->gh_state = state;
763         gh->gh_flags = flags;
764         gh->gh_iflags = 0;
765         gh->gh_ip = (unsigned long)__builtin_return_address(0);
766 }
767
768 /**
769  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
770  * @gh: the holder structure
771  *
772  */
773
774 void gfs2_holder_uninit(struct gfs2_holder *gh)
775 {
776         put_pid(gh->gh_owner_pid);
777         gfs2_glock_put(gh->gh_gl);
778         gh->gh_gl = NULL;
779         gh->gh_ip = 0;
780 }
781
782 static int just_schedule(void *word)
783 {
784         schedule();
785         return 0;
786 }
787
788 static void wait_on_holder(struct gfs2_holder *gh)
789 {
790         might_sleep();
791         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
792 }
793
794 static void wait_on_demote(struct gfs2_glock *gl)
795 {
796         might_sleep();
797         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
798 }
799
800 /**
801  * handle_callback - process a demote request
802  * @gl: the glock
803  * @state: the state the caller wants us to change to
804  *
805  * There are only two requests that we are going to see in actual
806  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
807  */
808
809 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
810                             int remote, unsigned long delay)
811 {
812         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
813
814         set_bit(bit, &gl->gl_flags);
815         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
816                 gl->gl_demote_state = state;
817                 gl->gl_demote_time = jiffies;
818                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
819                     gl->gl_object)
820                         gfs2_glock_schedule_for_reclaim(gl);
821         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
822                         gl->gl_demote_state != state) {
823                 gl->gl_demote_state = LM_ST_UNLOCKED;
824         }
825 }
826
827 /**
828  * gfs2_glock_wait - wait on a glock acquisition
829  * @gh: the glock holder
830  *
831  * Returns: 0 on success
832  */
833
834 int gfs2_glock_wait(struct gfs2_holder *gh)
835 {
836         wait_on_holder(gh);
837         return gh->gh_error;
838 }
839
840 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
841 {
842         va_list args;
843
844         va_start(args, fmt);
845         if (seq) {
846                 struct gfs2_glock_iter *gi = seq->private;
847                 vsprintf(gi->string, fmt, args);
848                 seq_printf(seq, gi->string);
849         } else {
850                 printk(KERN_ERR " ");
851                 vprintk(fmt, args);
852         }
853         va_end(args);
854 }
855
856 /**
857  * add_to_queue - Add a holder to the wait queue (but look for recursion)
858  * @gh: the holder structure to add
859  *
860  * Eventually we should move the recursive locking trap to a
861  * debugging option or something like that. This is the fast
862  * path and needs to have the minimum number of distractions.
863  * 
864  */
865
866 static inline void add_to_queue(struct gfs2_holder *gh)
867 {
868         struct gfs2_glock *gl = gh->gh_gl;
869         struct gfs2_sbd *sdp = gl->gl_sbd;
870         struct list_head *insert_pt = NULL;
871         struct gfs2_holder *gh2;
872         int try_lock = 0;
873
874         BUG_ON(gh->gh_owner_pid == NULL);
875         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
876                 BUG();
877
878         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
879                 if (test_bit(GLF_LOCK, &gl->gl_flags))
880                         try_lock = 1;
881                 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
882                         goto fail;
883         }
884
885         list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
886                 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
887                     (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
888                         goto trap_recursive;
889                 if (try_lock &&
890                     !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
891                     !may_grant(gl, gh)) {
892 fail:
893                         gh->gh_error = GLR_TRYFAILED;
894                         gfs2_holder_wake(gh);
895                         return;
896                 }
897                 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
898                         continue;
899                 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
900                         insert_pt = &gh2->gh_list;
901         }
902         if (likely(insert_pt == NULL)) {
903                 list_add_tail(&gh->gh_list, &gl->gl_holders);
904                 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
905                         goto do_cancel;
906                 return;
907         }
908         list_add_tail(&gh->gh_list, insert_pt);
909 do_cancel:
910         gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
911         if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
912                 spin_unlock(&gl->gl_spin);
913                 sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
914                 spin_lock(&gl->gl_spin);
915         }
916         return;
917
918 trap_recursive:
919         print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
920         printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
921         printk(KERN_ERR "lock type: %d req lock state : %d\n",
922                gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
923         print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
924         printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
925         printk(KERN_ERR "lock type: %d req lock state : %d\n",
926                gh->gh_gl->gl_name.ln_type, gh->gh_state);
927         __dump_glock(NULL, gl);
928         BUG();
929 }
930
931 /**
932  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
933  * @gh: the holder structure
934  *
935  * if (gh->gh_flags & GL_ASYNC), this never returns an error
936  *
937  * Returns: 0, GLR_TRYFAILED, or errno on failure
938  */
939
940 int gfs2_glock_nq(struct gfs2_holder *gh)
941 {
942         struct gfs2_glock *gl = gh->gh_gl;
943         struct gfs2_sbd *sdp = gl->gl_sbd;
944         int error = 0;
945
946         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
947                 return -EIO;
948
949         spin_lock(&gl->gl_spin);
950         add_to_queue(gh);
951         run_queue(gl, 1);
952         spin_unlock(&gl->gl_spin);
953
954         if (!(gh->gh_flags & GL_ASYNC))
955                 error = gfs2_glock_wait(gh);
956
957         return error;
958 }
959
960 /**
961  * gfs2_glock_poll - poll to see if an async request has been completed
962  * @gh: the holder
963  *
964  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
965  */
966
967 int gfs2_glock_poll(struct gfs2_holder *gh)
968 {
969         return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
970 }
971
972 /**
973  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
974  * @gh: the glock holder
975  *
976  */
977
978 void gfs2_glock_dq(struct gfs2_holder *gh)
979 {
980         struct gfs2_glock *gl = gh->gh_gl;
981         const struct gfs2_glock_operations *glops = gl->gl_ops;
982         unsigned delay = 0;
983         int fast_path = 0;
984
985         spin_lock(&gl->gl_spin);
986         if (gh->gh_flags & GL_NOCACHE)
987                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
988
989         list_del_init(&gh->gh_list);
990         if (find_first_holder(gl) == NULL) {
991                 if (glops->go_unlock) {
992                         GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
993                         spin_unlock(&gl->gl_spin);
994                         glops->go_unlock(gh);
995                         spin_lock(&gl->gl_spin);
996                         clear_bit(GLF_LOCK, &gl->gl_flags);
997                 }
998                 gl->gl_stamp = jiffies;
999                 if (list_empty(&gl->gl_holders) &&
1000                     !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1001                     !test_bit(GLF_DEMOTE, &gl->gl_flags))
1002                         fast_path = 1;
1003         }
1004         spin_unlock(&gl->gl_spin);
1005         if (likely(fast_path))
1006                 return;
1007
1008         gfs2_glock_hold(gl);
1009         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1010             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1011                 delay = gl->gl_ops->go_min_hold_time;
1012         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1013                 gfs2_glock_put(gl);
1014 }
1015
1016 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1017 {
1018         struct gfs2_glock *gl = gh->gh_gl;
1019         gfs2_glock_dq(gh);
1020         wait_on_demote(gl);
1021 }
1022
1023 /**
1024  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1025  * @gh: the holder structure
1026  *
1027  */
1028
1029 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1030 {
1031         gfs2_glock_dq(gh);
1032         gfs2_holder_uninit(gh);
1033 }
1034
1035 /**
1036  * gfs2_glock_nq_num - acquire a glock based on lock number
1037  * @sdp: the filesystem
1038  * @number: the lock number
1039  * @glops: the glock operations for the type of glock
1040  * @state: the state to acquire the glock in
1041  * @flags: modifier flags for the aquisition
1042  * @gh: the struct gfs2_holder
1043  *
1044  * Returns: errno
1045  */
1046
1047 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1048                       const struct gfs2_glock_operations *glops,
1049                       unsigned int state, int flags, struct gfs2_holder *gh)
1050 {
1051         struct gfs2_glock *gl;
1052         int error;
1053
1054         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1055         if (!error) {
1056                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1057                 gfs2_glock_put(gl);
1058         }
1059
1060         return error;
1061 }
1062
1063 /**
1064  * glock_compare - Compare two struct gfs2_glock structures for sorting
1065  * @arg_a: the first structure
1066  * @arg_b: the second structure
1067  *
1068  */
1069
1070 static int glock_compare(const void *arg_a, const void *arg_b)
1071 {
1072         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1073         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1074         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1075         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1076
1077         if (a->ln_number > b->ln_number)
1078                 return 1;
1079         if (a->ln_number < b->ln_number)
1080                 return -1;
1081         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1082         return 0;
1083 }
1084
1085 /**
1086  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1087  * @num_gh: the number of structures
1088  * @ghs: an array of struct gfs2_holder structures
1089  *
1090  * Returns: 0 on success (all glocks acquired),
1091  *          errno on failure (no glocks acquired)
1092  */
1093
1094 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1095                      struct gfs2_holder **p)
1096 {
1097         unsigned int x;
1098         int error = 0;
1099
1100         for (x = 0; x < num_gh; x++)
1101                 p[x] = &ghs[x];
1102
1103         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1104
1105         for (x = 0; x < num_gh; x++) {
1106                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1107
1108                 error = gfs2_glock_nq(p[x]);
1109                 if (error) {
1110                         while (x--)
1111                                 gfs2_glock_dq(p[x]);
1112                         break;
1113                 }
1114         }
1115
1116         return error;
1117 }
1118
1119 /**
1120  * gfs2_glock_nq_m - acquire multiple glocks
1121  * @num_gh: the number of structures
1122  * @ghs: an array of struct gfs2_holder structures
1123  *
1124  *
1125  * Returns: 0 on success (all glocks acquired),
1126  *          errno on failure (no glocks acquired)
1127  */
1128
1129 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1130 {
1131         struct gfs2_holder *tmp[4];
1132         struct gfs2_holder **pph = tmp;
1133         int error = 0;
1134
1135         switch(num_gh) {
1136         case 0:
1137                 return 0;
1138         case 1:
1139                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1140                 return gfs2_glock_nq(ghs);
1141         default:
1142                 if (num_gh <= 4)
1143                         break;
1144                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1145                 if (!pph)
1146                         return -ENOMEM;
1147         }
1148
1149         error = nq_m_sync(num_gh, ghs, pph);
1150
1151         if (pph != tmp)
1152                 kfree(pph);
1153
1154         return error;
1155 }
1156
1157 /**
1158  * gfs2_glock_dq_m - release multiple glocks
1159  * @num_gh: the number of structures
1160  * @ghs: an array of struct gfs2_holder structures
1161  *
1162  */
1163
1164 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1165 {
1166         unsigned int x;
1167
1168         for (x = 0; x < num_gh; x++)
1169                 gfs2_glock_dq(&ghs[x]);
1170 }
1171
1172 /**
1173  * gfs2_glock_dq_uninit_m - release multiple glocks
1174  * @num_gh: the number of structures
1175  * @ghs: an array of struct gfs2_holder structures
1176  *
1177  */
1178
1179 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1180 {
1181         unsigned int x;
1182
1183         for (x = 0; x < num_gh; x++)
1184                 gfs2_glock_dq_uninit(&ghs[x]);
1185 }
1186
1187 static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
1188 {
1189         int error = -EIO;
1190         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1191                 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
1192         return error;
1193 }
1194
1195 /**
1196  * gfs2_lvb_hold - attach a LVB from a glock
1197  * @gl: The glock in question
1198  *
1199  */
1200
1201 int gfs2_lvb_hold(struct gfs2_glock *gl)
1202 {
1203         int error;
1204
1205         if (!atomic_read(&gl->gl_lvb_count)) {
1206                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1207                 if (error) 
1208                         return error;
1209                 gfs2_glock_hold(gl);
1210         }
1211         atomic_inc(&gl->gl_lvb_count);
1212
1213         return 0;
1214 }
1215
1216 /**
1217  * gfs2_lvb_unhold - detach a LVB from a glock
1218  * @gl: The glock in question
1219  *
1220  */
1221
1222 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1223 {
1224         struct gfs2_sbd *sdp = gl->gl_sbd;
1225
1226         gfs2_glock_hold(gl);
1227         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1228         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1229                 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1230                         sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
1231                 gl->gl_lvb = NULL;
1232                 gfs2_glock_put(gl);
1233         }
1234         gfs2_glock_put(gl);
1235 }
1236
1237 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1238                         unsigned int state)
1239 {
1240         struct gfs2_glock *gl;
1241         unsigned long delay = 0;
1242         unsigned long holdtime;
1243         unsigned long now = jiffies;
1244
1245         gl = gfs2_glock_find(sdp, name);
1246         if (!gl)
1247                 return;
1248
1249         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1250         if (time_before(now, holdtime))
1251                 delay = holdtime - now;
1252
1253         spin_lock(&gl->gl_spin);
1254         handle_callback(gl, state, 1, delay);
1255         spin_unlock(&gl->gl_spin);
1256         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1257                 gfs2_glock_put(gl);
1258 }
1259
1260 /**
1261  * gfs2_glock_cb - Callback used by locking module
1262  * @sdp: Pointer to the superblock
1263  * @type: Type of callback
1264  * @data: Type dependent data pointer
1265  *
1266  * Called by the locking module when it wants to tell us something.
1267  * Either we need to drop a lock, one of our ASYNC requests completed, or
1268  * a journal from another client needs to be recovered.
1269  */
1270
1271 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1272 {
1273         struct gfs2_sbd *sdp = cb_data;
1274
1275         switch (type) {
1276         case LM_CB_NEED_E:
1277                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1278                 return;
1279
1280         case LM_CB_NEED_D:
1281                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1282                 return;
1283
1284         case LM_CB_NEED_S:
1285                 blocking_cb(sdp, data, LM_ST_SHARED);
1286                 return;
1287
1288         case LM_CB_ASYNC: {
1289                 struct lm_async_cb *async = data;
1290                 struct gfs2_glock *gl;
1291
1292                 down_read(&gfs2_umount_flush_sem);
1293                 gl = gfs2_glock_find(sdp, &async->lc_name);
1294                 if (gfs2_assert_warn(sdp, gl))
1295                         return;
1296                 gl->gl_reply = async->lc_ret;
1297                 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1298                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1299                         gfs2_glock_put(gl);
1300                 up_read(&gfs2_umount_flush_sem);
1301                 return;
1302         }
1303
1304         case LM_CB_NEED_RECOVERY:
1305                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1306                 if (sdp->sd_recoverd_process)
1307                         wake_up_process(sdp->sd_recoverd_process);
1308                 return;
1309
1310         case LM_CB_DROPLOCKS:
1311                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1312                 gfs2_quota_scan(sdp);
1313                 return;
1314
1315         default:
1316                 gfs2_assert_warn(sdp, 0);
1317                 return;
1318         }
1319 }
1320
1321 /**
1322  * demote_ok - Check to see if it's ok to unlock a glock
1323  * @gl: the glock
1324  *
1325  * Returns: 1 if it's ok
1326  */
1327
1328 static int demote_ok(struct gfs2_glock *gl)
1329 {
1330         const struct gfs2_glock_operations *glops = gl->gl_ops;
1331         int demote = 1;
1332
1333         if (test_bit(GLF_STICKY, &gl->gl_flags))
1334                 demote = 0;
1335         else if (glops->go_demote_ok)
1336                 demote = glops->go_demote_ok(gl);
1337
1338         return demote;
1339 }
1340
1341 /**
1342  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1343  * @gl: the glock
1344  *
1345  */
1346
1347 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1348 {
1349         struct gfs2_sbd *sdp = gl->gl_sbd;
1350
1351         spin_lock(&sdp->sd_reclaim_lock);
1352         if (list_empty(&gl->gl_reclaim)) {
1353                 gfs2_glock_hold(gl);
1354                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1355                 atomic_inc(&sdp->sd_reclaim_count);
1356                 spin_unlock(&sdp->sd_reclaim_lock);
1357                 wake_up(&sdp->sd_reclaim_wq);
1358         } else
1359                 spin_unlock(&sdp->sd_reclaim_lock);
1360 }
1361
1362 /**
1363  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1364  * @sdp: the filesystem
1365  *
1366  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1367  * different glock and we notice that there are a lot of glocks in the
1368  * reclaim list.
1369  *
1370  */
1371
1372 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1373 {
1374         struct gfs2_glock *gl;
1375         int done_callback = 0;
1376
1377         spin_lock(&sdp->sd_reclaim_lock);
1378         if (list_empty(&sdp->sd_reclaim_list)) {
1379                 spin_unlock(&sdp->sd_reclaim_lock);
1380                 return;
1381         }
1382         gl = list_entry(sdp->sd_reclaim_list.next,
1383                         struct gfs2_glock, gl_reclaim);
1384         list_del_init(&gl->gl_reclaim);
1385         spin_unlock(&sdp->sd_reclaim_lock);
1386
1387         atomic_dec(&sdp->sd_reclaim_count);
1388         atomic_inc(&sdp->sd_reclaimed);
1389
1390         spin_lock(&gl->gl_spin);
1391         if (find_first_holder(gl) == NULL &&
1392             gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) {
1393                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1394                 done_callback = 1;
1395         }
1396         spin_unlock(&gl->gl_spin);
1397         if (!done_callback ||
1398             queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1399                 gfs2_glock_put(gl);
1400 }
1401
1402 /**
1403  * examine_bucket - Call a function for glock in a hash bucket
1404  * @examiner: the function
1405  * @sdp: the filesystem
1406  * @bucket: the bucket
1407  *
1408  * Returns: 1 if the bucket has entries
1409  */
1410
1411 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1412                           unsigned int hash)
1413 {
1414         struct gfs2_glock *gl, *prev = NULL;
1415         int has_entries = 0;
1416         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1417
1418         read_lock(gl_lock_addr(hash));
1419         /* Can't use hlist_for_each_entry - don't want prefetch here */
1420         if (hlist_empty(head))
1421                 goto out;
1422         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1423         while(1) {
1424                 if (!sdp || gl->gl_sbd == sdp) {
1425                         gfs2_glock_hold(gl);
1426                         read_unlock(gl_lock_addr(hash));
1427                         if (prev)
1428                                 gfs2_glock_put(prev);
1429                         prev = gl;
1430                         examiner(gl);
1431                         has_entries = 1;
1432                         read_lock(gl_lock_addr(hash));
1433                 }
1434                 if (gl->gl_list.next == NULL)
1435                         break;
1436                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1437         }
1438 out:
1439         read_unlock(gl_lock_addr(hash));
1440         if (prev)
1441                 gfs2_glock_put(prev);
1442         cond_resched();
1443         return has_entries;
1444 }
1445
1446 /**
1447  * scan_glock - look at a glock and see if we can reclaim it
1448  * @gl: the glock to look at
1449  *
1450  */
1451
1452 static void scan_glock(struct gfs2_glock *gl)
1453 {
1454         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1455                 return;
1456         if (test_bit(GLF_LOCK, &gl->gl_flags))
1457                 return;
1458
1459         spin_lock(&gl->gl_spin);
1460         if (find_first_holder(gl) == NULL &&
1461             gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1462                 gfs2_glock_schedule_for_reclaim(gl);
1463         spin_unlock(&gl->gl_spin);
1464 }
1465
1466 /**
1467  * clear_glock - look at a glock and see if we can free it from glock cache
1468  * @gl: the glock to look at
1469  *
1470  */
1471
1472 static void clear_glock(struct gfs2_glock *gl)
1473 {
1474         struct gfs2_sbd *sdp = gl->gl_sbd;
1475         int released;
1476
1477         spin_lock(&sdp->sd_reclaim_lock);
1478         if (!list_empty(&gl->gl_reclaim)) {
1479                 list_del_init(&gl->gl_reclaim);
1480                 atomic_dec(&sdp->sd_reclaim_count);
1481                 spin_unlock(&sdp->sd_reclaim_lock);
1482                 released = gfs2_glock_put(gl);
1483                 gfs2_assert(sdp, !released);
1484         } else {
1485                 spin_unlock(&sdp->sd_reclaim_lock);
1486         }
1487
1488         spin_lock(&gl->gl_spin);
1489         if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1490                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1491         spin_unlock(&gl->gl_spin);
1492         gfs2_glock_hold(gl);
1493         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1494                 gfs2_glock_put(gl);
1495 }
1496
1497 /**
1498  * gfs2_gl_hash_clear - Empty out the glock hash table
1499  * @sdp: the filesystem
1500  * @wait: wait until it's all gone
1501  *
1502  * Called when unmounting the filesystem, or when inter-node lock manager
1503  * requests DROPLOCKS because it is running out of capacity.
1504  */
1505
1506 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1507 {
1508         unsigned long t;
1509         unsigned int x;
1510         int cont;
1511
1512         t = jiffies;
1513
1514         for (;;) {
1515                 cont = 0;
1516                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1517                         if (examine_bucket(clear_glock, sdp, x))
1518                                 cont = 1;
1519                 }
1520
1521                 if (!wait || !cont)
1522                         break;
1523
1524                 if (time_after_eq(jiffies,
1525                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1526                         fs_warn(sdp, "Unmount seems to be stalled. "
1527                                      "Dumping lock state...\n");
1528                         gfs2_dump_lockstate(sdp);
1529                         t = jiffies;
1530                 }
1531
1532                 down_write(&gfs2_umount_flush_sem);
1533                 invalidate_inodes(sdp->sd_vfs);
1534                 up_write(&gfs2_umount_flush_sem);
1535                 msleep(10);
1536         }
1537 }
1538
1539 static const char *state2str(unsigned state)
1540 {
1541         switch(state) {
1542         case LM_ST_UNLOCKED:
1543                 return "UN";
1544         case LM_ST_SHARED:
1545                 return "SH";
1546         case LM_ST_DEFERRED:
1547                 return "DF";
1548         case LM_ST_EXCLUSIVE:
1549                 return "EX";
1550         }
1551         return "??";
1552 }
1553
1554 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1555 {
1556         char *p = buf;
1557         if (flags & LM_FLAG_TRY)
1558                 *p++ = 't';
1559         if (flags & LM_FLAG_TRY_1CB)
1560                 *p++ = 'T';
1561         if (flags & LM_FLAG_NOEXP)
1562                 *p++ = 'e';
1563         if (flags & LM_FLAG_ANY)
1564                 *p++ = 'a';
1565         if (flags & LM_FLAG_PRIORITY)
1566                 *p++ = 'p';
1567         if (flags & GL_ASYNC)
1568                 *p++ = 'a';
1569         if (flags & GL_EXACT)
1570                 *p++ = 'E';
1571         if (flags & GL_ATIME)
1572                 *p++ = 'a';
1573         if (flags & GL_NOCACHE)
1574                 *p++ = 'c';
1575         if (test_bit(HIF_HOLDER, &iflags))
1576                 *p++ = 'H';
1577         if (test_bit(HIF_WAIT, &iflags))
1578                 *p++ = 'W';
1579         if (test_bit(HIF_FIRST, &iflags))
1580                 *p++ = 'F';
1581         *p = 0;
1582         return buf;
1583 }
1584
1585 /**
1586  * dump_holder - print information about a glock holder
1587  * @seq: the seq_file struct
1588  * @gh: the glock holder
1589  *
1590  * Returns: 0 on success, -ENOBUFS when we run out of space
1591  */
1592
1593 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1594 {
1595         struct task_struct *gh_owner = NULL;
1596         char buffer[KSYM_SYMBOL_LEN];
1597         char flags_buf[32];
1598
1599         sprint_symbol(buffer, gh->gh_ip);
1600         if (gh->gh_owner_pid)
1601                 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1602         gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1603                   state2str(gh->gh_state),
1604                   hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1605                   gh->gh_error, 
1606                   gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1607                   gh_owner ? gh_owner->comm : "(ended)", buffer);
1608         return 0;
1609 }
1610
1611 static const char *gflags2str(char *buf, const unsigned long *gflags)
1612 {
1613         char *p = buf;
1614         if (test_bit(GLF_LOCK, gflags))
1615                 *p++ = 'l';
1616         if (test_bit(GLF_STICKY, gflags))
1617                 *p++ = 's';
1618         if (test_bit(GLF_DEMOTE, gflags))
1619                 *p++ = 'D';
1620         if (test_bit(GLF_PENDING_DEMOTE, gflags))
1621                 *p++ = 'd';
1622         if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1623                 *p++ = 'p';
1624         if (test_bit(GLF_DIRTY, gflags))
1625                 *p++ = 'y';
1626         if (test_bit(GLF_LFLUSH, gflags))
1627                 *p++ = 'f';
1628         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1629                 *p++ = 'i';
1630         if (test_bit(GLF_REPLY_PENDING, gflags))
1631                 *p++ = 'r';
1632         *p = 0;
1633         return buf;
1634 }
1635
1636 /**
1637  * __dump_glock - print information about a glock
1638  * @seq: The seq_file struct
1639  * @gl: the glock
1640  *
1641  * The file format is as follows:
1642  * One line per object, capital letters are used to indicate objects
1643  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1644  * other objects are indented by a single space and follow the glock to
1645  * which they are related. Fields are indicated by lower case letters
1646  * followed by a colon and the field value, except for strings which are in
1647  * [] so that its possible to see if they are composed of spaces for
1648  * example. The field's are n = number (id of the object), f = flags,
1649  * t = type, s = state, r = refcount, e = error, p = pid.
1650  *
1651  * Returns: 0 on success, -ENOBUFS when we run out of space
1652  */
1653
1654 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1655 {
1656         const struct gfs2_glock_operations *glops = gl->gl_ops;
1657         unsigned long long dtime;
1658         const struct gfs2_holder *gh;
1659         char gflags_buf[32];
1660         int error = 0;
1661
1662         dtime = jiffies - gl->gl_demote_time;
1663         dtime *= 1000000/HZ; /* demote time in uSec */
1664         if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1665                 dtime = 0;
1666         gfs2_print_dbg(seq, "G:  s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n",
1667                   state2str(gl->gl_state),
1668                   gl->gl_name.ln_type,
1669                   (unsigned long long)gl->gl_name.ln_number,
1670                   gflags2str(gflags_buf, &gl->gl_flags),
1671                   state2str(gl->gl_target),
1672                   state2str(gl->gl_demote_state), dtime,
1673                   atomic_read(&gl->gl_lvb_count),
1674                   atomic_read(&gl->gl_ail_count),
1675                   atomic_read(&gl->gl_ref));
1676
1677         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1678                 error = dump_holder(seq, gh);
1679                 if (error)
1680                         goto out;
1681         }
1682         if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1683                 error = glops->go_dump(seq, gl);
1684 out:
1685         return error;
1686 }
1687
1688 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1689 {
1690         int ret;
1691         spin_lock(&gl->gl_spin);
1692         ret = __dump_glock(seq, gl);
1693         spin_unlock(&gl->gl_spin);
1694         return ret;
1695 }
1696
1697 /**
1698  * gfs2_dump_lockstate - print out the current lockstate
1699  * @sdp: the filesystem
1700  * @ub: the buffer to copy the information into
1701  *
1702  * If @ub is NULL, dump the lockstate to the console.
1703  *
1704  */
1705
1706 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1707 {
1708         struct gfs2_glock *gl;
1709         struct hlist_node *h;
1710         unsigned int x;
1711         int error = 0;
1712
1713         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1714
1715                 read_lock(gl_lock_addr(x));
1716
1717                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1718                         if (gl->gl_sbd != sdp)
1719                                 continue;
1720
1721                         error = dump_glock(NULL, gl);
1722                         if (error)
1723                                 break;
1724                 }
1725
1726                 read_unlock(gl_lock_addr(x));
1727
1728                 if (error)
1729                         break;
1730         }
1731
1732
1733         return error;
1734 }
1735
1736 /**
1737  * gfs2_scand - Look for cached glocks and inodes to toss from memory
1738  * @sdp: Pointer to GFS2 superblock
1739  *
1740  * One of these daemons runs, finding candidates to add to sd_reclaim_list.
1741  * See gfs2_glockd()
1742  */
1743
1744 static int gfs2_scand(void *data)
1745 {
1746         unsigned x;
1747         unsigned delay;
1748
1749         while (!kthread_should_stop()) {
1750                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1751                         examine_bucket(scan_glock, NULL, x);
1752                 if (freezing(current))
1753                         refrigerator();
1754                 delay = scand_secs;
1755                 if (delay < 1)
1756                         delay = 1;
1757                 schedule_timeout_interruptible(delay * HZ);
1758         }
1759
1760         return 0;
1761 }
1762
1763
1764
1765 int __init gfs2_glock_init(void)
1766 {
1767         unsigned i;
1768         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1769                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1770         }
1771 #ifdef GL_HASH_LOCK_SZ
1772         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1773                 rwlock_init(&gl_hash_locks[i]);
1774         }
1775 #endif
1776
1777         scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
1778         if (IS_ERR(scand_process))
1779                 return PTR_ERR(scand_process);
1780
1781         glock_workqueue = create_workqueue("glock_workqueue");
1782         if (IS_ERR(glock_workqueue)) {
1783                 kthread_stop(scand_process);
1784                 return PTR_ERR(glock_workqueue);
1785         }
1786
1787         return 0;
1788 }
1789
1790 void gfs2_glock_exit(void)
1791 {
1792         destroy_workqueue(glock_workqueue);
1793         kthread_stop(scand_process);
1794 }
1795
1796 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
1797 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
1798
1799 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1800 {
1801         struct gfs2_glock *gl;
1802
1803 restart:
1804         read_lock(gl_lock_addr(gi->hash));
1805         gl = gi->gl;
1806         if (gl) {
1807                 gi->gl = hlist_entry(gl->gl_list.next,
1808                                      struct gfs2_glock, gl_list);
1809                 if (gi->gl)
1810                         gfs2_glock_hold(gi->gl);
1811         }
1812         read_unlock(gl_lock_addr(gi->hash));
1813         if (gl)
1814                 gfs2_glock_put(gl);
1815         if (gl && gi->gl == NULL)
1816                 gi->hash++;
1817         while (gi->gl == NULL) {
1818                 if (gi->hash >= GFS2_GL_HASH_SIZE)
1819                         return 1;
1820                 read_lock(gl_lock_addr(gi->hash));
1821                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1822                                      struct gfs2_glock, gl_list);
1823                 if (gi->gl)
1824                         gfs2_glock_hold(gi->gl);
1825                 read_unlock(gl_lock_addr(gi->hash));
1826                 gi->hash++;
1827         }
1828
1829         if (gi->sdp != gi->gl->gl_sbd)
1830                 goto restart;
1831
1832         return 0;
1833 }
1834
1835 static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1836 {
1837         if (gi->gl)
1838                 gfs2_glock_put(gi->gl);
1839         gi->gl = NULL;
1840 }
1841
1842 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1843 {
1844         struct gfs2_glock_iter *gi = seq->private;
1845         loff_t n = *pos;
1846
1847         gi->hash = 0;
1848
1849         do {
1850                 if (gfs2_glock_iter_next(gi)) {
1851                         gfs2_glock_iter_free(gi);
1852                         return NULL;
1853                 }
1854         } while (n--);
1855
1856         return gi->gl;
1857 }
1858
1859 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1860                                  loff_t *pos)
1861 {
1862         struct gfs2_glock_iter *gi = seq->private;
1863
1864         (*pos)++;
1865
1866         if (gfs2_glock_iter_next(gi)) {
1867                 gfs2_glock_iter_free(gi);
1868                 return NULL;
1869         }
1870
1871         return gi->gl;
1872 }
1873
1874 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1875 {
1876         struct gfs2_glock_iter *gi = seq->private;
1877         gfs2_glock_iter_free(gi);
1878 }
1879
1880 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1881 {
1882         return dump_glock(seq, iter_ptr);
1883 }
1884
1885 static const struct seq_operations gfs2_glock_seq_ops = {
1886         .start = gfs2_glock_seq_start,
1887         .next  = gfs2_glock_seq_next,
1888         .stop  = gfs2_glock_seq_stop,
1889         .show  = gfs2_glock_seq_show,
1890 };
1891
1892 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1893 {
1894         int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1895                                    sizeof(struct gfs2_glock_iter));
1896         if (ret == 0) {
1897                 struct seq_file *seq = file->private_data;
1898                 struct gfs2_glock_iter *gi = seq->private;
1899                 gi->sdp = inode->i_private;
1900         }
1901         return ret;
1902 }
1903
1904 static const struct file_operations gfs2_debug_fops = {
1905         .owner   = THIS_MODULE,
1906         .open    = gfs2_debugfs_open,
1907         .read    = seq_read,
1908         .llseek  = seq_lseek,
1909         .release = seq_release_private,
1910 };
1911
1912 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1913 {
1914         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1915         if (!sdp->debugfs_dir)
1916                 return -ENOMEM;
1917         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1918                                                          S_IFREG | S_IRUGO,
1919                                                          sdp->debugfs_dir, sdp,
1920                                                          &gfs2_debug_fops);
1921         if (!sdp->debugfs_dentry_glocks)
1922                 return -ENOMEM;
1923
1924         return 0;
1925 }
1926
1927 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1928 {
1929         if (sdp && sdp->debugfs_dir) {
1930                 if (sdp->debugfs_dentry_glocks) {
1931                         debugfs_remove(sdp->debugfs_dentry_glocks);
1932                         sdp->debugfs_dentry_glocks = NULL;
1933                 }
1934                 debugfs_remove(sdp->debugfs_dir);
1935                 sdp->debugfs_dir = NULL;
1936         }
1937 }
1938
1939 int gfs2_register_debugfs(void)
1940 {
1941         gfs2_root = debugfs_create_dir("gfs2", NULL);
1942         return gfs2_root ? 0 : -ENOMEM;
1943 }
1944
1945 void gfs2_unregister_debugfs(void)
1946 {
1947         debugfs_remove(gfs2_root);
1948         gfs2_root = NULL;
1949 }