2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
32 * ail_empty_gl - remove all buffers for a given lock from the AIL
35 * None of the buffers should be dirty, locked, or pinned.
38 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
40 struct gfs2_sbd *sdp = gl->gl_sbd;
42 struct list_head *head = &gl->gl_ail_list;
43 struct gfs2_bufdata *bd;
44 struct buffer_head *bh;
48 blocks = atomic_read(&gl->gl_ail_count);
52 error = gfs2_trans_begin(sdp, 0, blocks);
53 if (gfs2_assert_withdraw(sdp, !error))
57 while (!list_empty(head)) {
58 bd = list_entry(head->next, struct gfs2_bufdata,
61 blkno = bh->b_blocknr;
62 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
65 list_del(&bd->bd_ail_st_list);
66 list_del(&bd->bd_ail_gl_list);
67 atomic_dec(&gl->gl_ail_count);
71 gfs2_trans_add_revoke(sdp, blkno);
75 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
79 gfs2_log_flush(sdp, NULL);
83 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
88 static void gfs2_pte_inval(struct gfs2_glock *gl)
90 struct gfs2_inode *ip;
95 if (!ip || !S_ISREG(inode->i_mode))
98 if (!test_bit(GIF_PAGED, &ip->i_flags))
101 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
103 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
104 set_bit(GLF_DIRTY, &gl->gl_flags);
106 clear_bit(GIF_SW_PAGED, &ip->i_flags);
110 * gfs2_page_inval - Invalidate all pages associated with a glock
115 static void gfs2_page_inval(struct gfs2_glock *gl)
117 struct gfs2_inode *ip;
121 inode = &ip->i_inode;
122 if (!ip || !S_ISREG(inode->i_mode))
125 truncate_inode_pages(inode->i_mapping, 0);
126 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
127 clear_bit(GIF_PAGED, &ip->i_flags);
131 * gfs2_page_wait - Wait for writeback of data
134 * Syncs data (not metadata) for a regular file.
135 * No-op for all other types.
138 static void gfs2_page_wait(struct gfs2_glock *gl)
140 struct gfs2_inode *ip = gl->gl_object;
141 struct inode *inode = &ip->i_inode;
142 struct address_space *mapping = inode->i_mapping;
145 if (!S_ISREG(inode->i_mode))
148 error = filemap_fdatawait(mapping);
150 /* Put back any errors cleared by filemap_fdatawait()
151 so they can be caught by someone who can pass them
154 if (error == -ENOSPC)
155 set_bit(AS_ENOSPC, &mapping->flags);
157 set_bit(AS_EIO, &mapping->flags);
161 static void gfs2_page_writeback(struct gfs2_glock *gl)
163 struct gfs2_inode *ip = gl->gl_object;
164 struct inode *inode = &ip->i_inode;
165 struct address_space *mapping = inode->i_mapping;
167 if (!S_ISREG(inode->i_mode))
170 filemap_fdatawrite(mapping);
174 * meta_go_sync - sync out the metadata for this glock
177 * Called when demoting or unlocking an EX glock. We must flush
178 * to disk all dirty buffers/pages relating to this glock, and must not
179 * not return to caller to demote/unlock the glock until I/O is complete.
182 static void meta_go_sync(struct gfs2_glock *gl)
184 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
185 gfs2_log_flush(gl->gl_sbd, gl);
187 gfs2_ail_empty_gl(gl);
193 * meta_go_inval - invalidate the metadata for this glock
199 static void meta_go_inval(struct gfs2_glock *gl, int flags)
201 if (!(flags & DIO_METADATA))
209 * inode_go_xmote_th - promote/demote a glock
211 * @state: the requested state
216 static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
219 if (gl->gl_state != LM_ST_UNLOCKED)
221 gfs2_glock_xmote_th(gl, state, flags);
225 * inode_go_xmote_bh - After promoting/demoting a glock
230 static void inode_go_xmote_bh(struct gfs2_glock *gl)
232 struct gfs2_holder *gh = gl->gl_req_gh;
233 struct buffer_head *bh;
236 if (gl->gl_state != LM_ST_UNLOCKED &&
237 (!gh || !(gh->gh_flags & GL_SKIP))) {
238 error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
245 * inode_go_drop_th - unlock a glock
248 * Invoked from rq_demote().
249 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
250 * is being purged from our node's glock cache; we're dropping lock.
253 static void inode_go_drop_th(struct gfs2_glock *gl)
256 gfs2_glock_drop_th(gl);
260 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
261 * @gl: the glock protecting the inode
265 static void inode_go_sync(struct gfs2_glock *gl)
267 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
268 gfs2_page_writeback(gl);
269 gfs2_log_flush(gl->gl_sbd, gl);
272 clear_bit(GLF_DIRTY, &gl->gl_flags);
273 gfs2_ail_empty_gl(gl);
278 * inode_go_inval - prepare a inode glock to be released
284 static void inode_go_inval(struct gfs2_glock *gl, int flags)
286 int meta = (flags & DIO_METADATA);
289 struct gfs2_inode *ip = gl->gl_object;
291 set_bit(GIF_INVALID, &ip->i_flags);
297 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
300 * Returns: 1 if it's ok
303 static int inode_go_demote_ok(struct gfs2_glock *gl)
305 struct gfs2_sbd *sdp = gl->gl_sbd;
308 if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
310 else if (!sdp->sd_args.ar_localcaching &&
311 time_after_eq(jiffies, gl->gl_stamp +
312 gfs2_tune_get(sdp, gt_demote_secs) * HZ))
319 * inode_go_lock - operation done after an inode lock is locked by a process
326 static int inode_go_lock(struct gfs2_holder *gh)
328 struct gfs2_glock *gl = gh->gh_gl;
329 struct gfs2_inode *ip = gl->gl_object;
335 if (test_bit(GIF_INVALID, &ip->i_flags)) {
336 error = gfs2_inode_refresh(ip);
341 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
342 (gl->gl_state == LM_ST_EXCLUSIVE) &&
343 (gh->gh_flags & GL_LOCAL_EXCL))
344 error = gfs2_truncatei_resume(ip);
350 * inode_go_unlock - operation done before an inode lock is unlocked by a
357 static void inode_go_unlock(struct gfs2_holder *gh)
359 struct gfs2_glock *gl = gh->gh_gl;
360 struct gfs2_inode *ip = gl->gl_object;
363 gfs2_meta_cache_flush(ip);
372 static void inode_greedy(struct gfs2_glock *gl)
374 struct gfs2_sbd *sdp = gl->gl_sbd;
375 struct gfs2_inode *ip = gl->gl_object;
376 unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
377 unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
378 unsigned int new_time;
380 spin_lock(&ip->i_spin);
382 if (time_after(ip->i_last_pfault + quantum, jiffies)) {
383 new_time = ip->i_greedy + quantum;
387 new_time = ip->i_greedy - quantum;
388 if (!new_time || new_time > max)
392 ip->i_greedy = new_time;
394 spin_unlock(&ip->i_spin);
400 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
403 * Returns: 1 if it's ok
406 static int rgrp_go_demote_ok(struct gfs2_glock *gl)
408 return !gl->gl_aspace->i_mapping->nrpages;
412 * rgrp_go_lock - operation done after an rgrp lock is locked by
413 * a first holder on this node.
420 static int rgrp_go_lock(struct gfs2_holder *gh)
422 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
426 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
427 * a last holder on this node.
433 static void rgrp_go_unlock(struct gfs2_holder *gh)
435 gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
439 * trans_go_xmote_th - promote/demote the transaction glock
441 * @state: the requested state
446 static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
449 struct gfs2_sbd *sdp = gl->gl_sbd;
451 if (gl->gl_state != LM_ST_UNLOCKED &&
452 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
453 gfs2_meta_syncfs(sdp);
454 gfs2_log_shutdown(sdp);
457 gfs2_glock_xmote_th(gl, state, flags);
461 * trans_go_xmote_bh - After promoting/demoting the transaction glock
466 static void trans_go_xmote_bh(struct gfs2_glock *gl)
468 struct gfs2_sbd *sdp = gl->gl_sbd;
469 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
470 struct gfs2_glock *j_gl = ip->i_gl;
471 struct gfs2_log_header_host head;
474 if (gl->gl_state != LM_ST_UNLOCKED &&
475 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
476 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
477 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
479 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
482 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
485 /* Initialize some head of the log stuff */
486 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
487 sdp->sd_log_sequence = head.lh_sequence + 1;
488 gfs2_log_pointers_init(sdp, head.lh_blkno);
494 * trans_go_drop_th - unlock the transaction glock
497 * We want to sync the device even with localcaching. Remember
498 * that localcaching journal replay only marks buffers dirty.
501 static void trans_go_drop_th(struct gfs2_glock *gl)
503 struct gfs2_sbd *sdp = gl->gl_sbd;
505 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
506 gfs2_meta_syncfs(sdp);
507 gfs2_log_shutdown(sdp);
510 gfs2_glock_drop_th(gl);
514 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
517 * Returns: 1 if it's ok
520 static int quota_go_demote_ok(struct gfs2_glock *gl)
522 return !atomic_read(&gl->gl_lvb_count);
525 const struct gfs2_glock_operations gfs2_meta_glops = {
526 .go_xmote_th = gfs2_glock_xmote_th,
527 .go_drop_th = gfs2_glock_drop_th,
528 .go_type = LM_TYPE_META,
531 const struct gfs2_glock_operations gfs2_inode_glops = {
532 .go_xmote_th = inode_go_xmote_th,
533 .go_xmote_bh = inode_go_xmote_bh,
534 .go_drop_th = inode_go_drop_th,
535 .go_sync = inode_go_sync,
536 .go_inval = inode_go_inval,
537 .go_demote_ok = inode_go_demote_ok,
538 .go_lock = inode_go_lock,
539 .go_unlock = inode_go_unlock,
540 .go_greedy = inode_greedy,
541 .go_type = LM_TYPE_INODE,
544 const struct gfs2_glock_operations gfs2_rgrp_glops = {
545 .go_xmote_th = gfs2_glock_xmote_th,
546 .go_drop_th = gfs2_glock_drop_th,
547 .go_sync = meta_go_sync,
548 .go_inval = meta_go_inval,
549 .go_demote_ok = rgrp_go_demote_ok,
550 .go_lock = rgrp_go_lock,
551 .go_unlock = rgrp_go_unlock,
552 .go_type = LM_TYPE_RGRP,
555 const struct gfs2_glock_operations gfs2_trans_glops = {
556 .go_xmote_th = trans_go_xmote_th,
557 .go_xmote_bh = trans_go_xmote_bh,
558 .go_drop_th = trans_go_drop_th,
559 .go_type = LM_TYPE_NONDISK,
562 const struct gfs2_glock_operations gfs2_iopen_glops = {
563 .go_xmote_th = gfs2_glock_xmote_th,
564 .go_drop_th = gfs2_glock_drop_th,
565 .go_type = LM_TYPE_IOPEN,
568 const struct gfs2_glock_operations gfs2_flock_glops = {
569 .go_xmote_th = gfs2_glock_xmote_th,
570 .go_drop_th = gfs2_glock_drop_th,
571 .go_type = LM_TYPE_FLOCK,
574 const struct gfs2_glock_operations gfs2_nondisk_glops = {
575 .go_xmote_th = gfs2_glock_xmote_th,
576 .go_drop_th = gfs2_glock_drop_th,
577 .go_type = LM_TYPE_NONDISK,
580 const struct gfs2_glock_operations gfs2_quota_glops = {
581 .go_xmote_th = gfs2_glock_xmote_th,
582 .go_drop_th = gfs2_glock_drop_th,
583 .go_demote_ok = quota_go_demote_ok,
584 .go_type = LM_TYPE_QUOTA,
587 const struct gfs2_glock_operations gfs2_journal_glops = {
588 .go_xmote_th = gfs2_glock_xmote_th,
589 .go_drop_th = gfs2_glock_drop_th,
590 .go_type = LM_TYPE_JOURNAL,