2 * linux/fs/jbd2/commit.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Journal commit routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
16 #include <linux/time.h>
18 #include <linux/jbd2.h>
19 #include <linux/marker.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
23 #include <linux/pagemap.h>
24 #include <linux/jiffies.h>
25 #include <linux/crc32.h>
26 #include <linux/writeback.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bio.h>
31 * Default IO end handler for temporary BJ_IO buffer_heads.
33 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
37 set_buffer_uptodate(bh);
39 clear_buffer_uptodate(bh);
44 * When an ext4 file is truncated, it is possible that some pages are not
45 * successfully freed, because they are attached to a committing transaction.
46 * After the transaction commits, these pages are left on the LRU, with no
47 * ->mapping, and with attached buffers. These pages are trivially reclaimable
48 * by the VM, but their apparent absence upsets the VM accounting, and it makes
49 * the numbers in /proc/meminfo look odd.
51 * So here, we have a buffer which has just come off the forget list. Look to
52 * see if we can strip all buffers from the backing page.
54 * Called under lock_journal(), and possibly under journal_datalist_lock. The
55 * caller provided us with a ref against the buffer, and we drop that here.
57 static void release_buffer_page(struct buffer_head *bh)
63 if (atomic_read(&bh->b_count) != 1)
71 /* OK, it's a truncated page */
72 if (!trylock_page(page))
77 try_to_free_buffers(page);
79 page_cache_release(page);
87 * Done it all: now submit the commit record. We should have
88 * cleaned up our previous buffers by now, so if we are in abort
89 * mode we can now just skip the rest of the journal write
92 * Returns 1 if the journal needs to be aborted or 0 on success
94 static int journal_submit_commit_record(journal_t *journal,
95 transaction_t *commit_transaction,
96 struct buffer_head **cbh,
99 struct journal_head *descriptor;
100 struct commit_header *tmp;
101 struct buffer_head *bh;
103 int barrier_done = 0;
104 struct timespec now = current_kernel_time();
106 if (is_journal_aborted(journal))
109 descriptor = jbd2_journal_get_descriptor_buffer(journal);
113 bh = jh2bh(descriptor);
115 tmp = (struct commit_header *)bh->b_data;
116 tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
117 tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
118 tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
119 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
120 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
122 if (JBD2_HAS_COMPAT_FEATURE(journal,
123 JBD2_FEATURE_COMPAT_CHECKSUM)) {
124 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
125 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
126 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
129 JBUFFER_TRACE(descriptor, "submit commit block");
131 clear_buffer_dirty(bh);
132 set_buffer_uptodate(bh);
133 bh->b_end_io = journal_end_buffer_io_sync;
135 if (journal->j_flags & JBD2_BARRIER &&
136 !JBD2_HAS_INCOMPAT_FEATURE(journal,
137 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
138 set_buffer_ordered(bh);
141 ret = submit_bh(WRITE_SYNC, bh);
143 clear_buffer_ordered(bh);
145 /* is it possible for another commit to fail at roughly
146 * the same time as this one? If so, we don't want to
147 * trust the barrier flag in the super, but instead want
148 * to remember if we sent a barrier request
150 if (ret == -EOPNOTSUPP && barrier_done) {
152 "JBD: barrier-based sync failed on %s - "
153 "disabling barriers\n", journal->j_devname);
154 spin_lock(&journal->j_state_lock);
155 journal->j_flags &= ~JBD2_BARRIER;
156 spin_unlock(&journal->j_state_lock);
158 /* And try again, without the barrier */
160 set_buffer_uptodate(bh);
161 clear_buffer_dirty(bh);
162 ret = submit_bh(WRITE_SYNC, bh);
169 * This function along with journal_submit_commit_record
170 * allows to write the commit record asynchronously.
172 static int journal_wait_on_commit_record(journal_t *journal,
173 struct buffer_head *bh)
178 clear_buffer_dirty(bh);
180 if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
182 "JBD2: wait_on_commit_record: sync failed on %s - "
183 "disabling barriers\n", journal->j_devname);
184 spin_lock(&journal->j_state_lock);
185 journal->j_flags &= ~JBD2_BARRIER;
186 spin_unlock(&journal->j_state_lock);
189 clear_buffer_dirty(bh);
190 set_buffer_uptodate(bh);
191 bh->b_end_io = journal_end_buffer_io_sync;
193 ret = submit_bh(WRITE_SYNC, bh);
201 if (unlikely(!buffer_uptodate(bh)))
203 put_bh(bh); /* One for getblk() */
204 jbd2_journal_put_journal_head(bh2jh(bh));
210 * write the filemap data using writepage() address_space_operations.
211 * We don't do block allocation here even for delalloc. We don't
212 * use writepages() because with dealyed allocation we may be doing
213 * block allocation in writepages().
215 static int journal_submit_inode_data_buffers(struct address_space *mapping)
218 struct writeback_control wbc = {
219 .sync_mode = WB_SYNC_ALL,
220 .nr_to_write = mapping->nrpages * 2,
222 .range_end = i_size_read(mapping->host),
226 ret = generic_writepages(mapping, &wbc);
231 * Submit all the data buffers of inode associated with the transaction to
234 * We are in a committing transaction. Therefore no new inode can be added to
235 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
236 * operate on from being released while we write out pages.
238 static int journal_submit_data_buffers(journal_t *journal,
239 transaction_t *commit_transaction)
241 struct jbd2_inode *jinode;
243 struct address_space *mapping;
245 spin_lock(&journal->j_list_lock);
246 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
247 mapping = jinode->i_vfs_inode->i_mapping;
248 jinode->i_flags |= JI_COMMIT_RUNNING;
249 spin_unlock(&journal->j_list_lock);
251 * submit the inode data buffers. We use writepage
252 * instead of writepages. Because writepages can do
253 * block allocation with delalloc. We need to write
254 * only allocated blocks here.
256 err = journal_submit_inode_data_buffers(mapping);
259 spin_lock(&journal->j_list_lock);
260 J_ASSERT(jinode->i_transaction == commit_transaction);
261 jinode->i_flags &= ~JI_COMMIT_RUNNING;
262 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
264 spin_unlock(&journal->j_list_lock);
269 * Wait for data submitted for writeout, refile inodes to proper
270 * transaction if needed.
273 static int journal_finish_inode_data_buffers(journal_t *journal,
274 transaction_t *commit_transaction)
276 struct jbd2_inode *jinode, *next_i;
279 /* For locking, see the comment in journal_submit_data_buffers() */
280 spin_lock(&journal->j_list_lock);
281 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
282 jinode->i_flags |= JI_COMMIT_RUNNING;
283 spin_unlock(&journal->j_list_lock);
284 err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
287 * Because AS_EIO is cleared by
288 * wait_on_page_writeback_range(), set it again so
289 * that user process can get -EIO from fsync().
292 &jinode->i_vfs_inode->i_mapping->flags);
297 spin_lock(&journal->j_list_lock);
298 jinode->i_flags &= ~JI_COMMIT_RUNNING;
299 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
302 /* Now refile inode to proper lists */
303 list_for_each_entry_safe(jinode, next_i,
304 &commit_transaction->t_inode_list, i_list) {
305 list_del(&jinode->i_list);
306 if (jinode->i_next_transaction) {
307 jinode->i_transaction = jinode->i_next_transaction;
308 jinode->i_next_transaction = NULL;
309 list_add(&jinode->i_list,
310 &jinode->i_transaction->t_inode_list);
312 jinode->i_transaction = NULL;
315 spin_unlock(&journal->j_list_lock);
320 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
322 struct page *page = bh->b_page;
326 addr = kmap_atomic(page, KM_USER0);
327 checksum = crc32_be(crc32_sum,
328 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
329 kunmap_atomic(addr, KM_USER0);
334 static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
335 unsigned long long block)
337 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
338 if (tag_bytes > JBD2_TAG_SIZE32)
339 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
343 * jbd2_journal_commit_transaction
345 * The primary function for committing a transaction to the log. This
346 * function is called by the journal thread to begin a complete commit.
348 void jbd2_journal_commit_transaction(journal_t *journal)
350 struct transaction_stats_s stats;
351 transaction_t *commit_transaction;
352 struct journal_head *jh, *new_jh, *descriptor;
353 struct buffer_head **wbuf = journal->j_wbuf;
357 unsigned long long blocknr;
361 journal_header_t *header;
362 journal_block_tag_t *tag = NULL;
367 int tag_bytes = journal_tag_bytes(journal);
368 struct buffer_head *cbh = NULL; /* For transactional checksums */
369 __u32 crc32_sum = ~0;
372 * First job: lock down the current transaction and wait for
373 * all outstanding updates to complete.
377 spin_lock(&journal->j_list_lock);
378 summarise_journal_usage(journal);
379 spin_unlock(&journal->j_list_lock);
382 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
383 if (journal->j_flags & JBD2_FLUSHED) {
384 jbd_debug(3, "super block updated\n");
385 jbd2_journal_update_superblock(journal, 1);
387 jbd_debug(3, "superblock not updated\n");
390 J_ASSERT(journal->j_running_transaction != NULL);
391 J_ASSERT(journal->j_committing_transaction == NULL);
393 commit_transaction = journal->j_running_transaction;
394 J_ASSERT(commit_transaction->t_state == T_RUNNING);
396 trace_mark(jbd2_start_commit, "dev %s transaction %d",
397 journal->j_devname, commit_transaction->t_tid);
398 jbd_debug(1, "JBD: starting commit of transaction %d\n",
399 commit_transaction->t_tid);
401 spin_lock(&journal->j_state_lock);
402 commit_transaction->t_state = T_LOCKED;
404 stats.u.run.rs_wait = commit_transaction->t_max_wait;
405 stats.u.run.rs_locked = jiffies;
406 stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
407 stats.u.run.rs_locked);
409 spin_lock(&commit_transaction->t_handle_lock);
410 while (commit_transaction->t_updates) {
413 prepare_to_wait(&journal->j_wait_updates, &wait,
414 TASK_UNINTERRUPTIBLE);
415 if (commit_transaction->t_updates) {
416 spin_unlock(&commit_transaction->t_handle_lock);
417 spin_unlock(&journal->j_state_lock);
419 spin_lock(&journal->j_state_lock);
420 spin_lock(&commit_transaction->t_handle_lock);
422 finish_wait(&journal->j_wait_updates, &wait);
424 spin_unlock(&commit_transaction->t_handle_lock);
426 J_ASSERT (commit_transaction->t_outstanding_credits <=
427 journal->j_max_transaction_buffers);
430 * First thing we are allowed to do is to discard any remaining
431 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
432 * that there are no such buffers: if a large filesystem
433 * operation like a truncate needs to split itself over multiple
434 * transactions, then it may try to do a jbd2_journal_restart() while
435 * there are still BJ_Reserved buffers outstanding. These must
436 * be released cleanly from the current transaction.
438 * In this case, the filesystem must still reserve write access
439 * again before modifying the buffer in the new transaction, but
440 * we do not require it to remember exactly which old buffers it
441 * has reserved. This is consistent with the existing behaviour
442 * that multiple jbd2_journal_get_write_access() calls to the same
443 * buffer are perfectly permissable.
445 while (commit_transaction->t_reserved_list) {
446 jh = commit_transaction->t_reserved_list;
447 JBUFFER_TRACE(jh, "reserved, unused: refile");
449 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
450 * leave undo-committed data.
452 if (jh->b_committed_data) {
453 struct buffer_head *bh = jh2bh(jh);
455 jbd_lock_bh_state(bh);
456 jbd2_free(jh->b_committed_data, bh->b_size);
457 jh->b_committed_data = NULL;
458 jbd_unlock_bh_state(bh);
460 jbd2_journal_refile_buffer(journal, jh);
464 * Now try to drop any written-back buffers from the journal's
465 * checkpoint lists. We do this *before* commit because it potentially
468 spin_lock(&journal->j_list_lock);
469 __jbd2_journal_clean_checkpoint_list(journal);
470 spin_unlock(&journal->j_list_lock);
472 jbd_debug (3, "JBD: commit phase 1\n");
475 * Switch to a new revoke table.
477 jbd2_journal_switch_revoke_table(journal);
479 stats.u.run.rs_flushing = jiffies;
480 stats.u.run.rs_locked = jbd2_time_diff(stats.u.run.rs_locked,
481 stats.u.run.rs_flushing);
483 commit_transaction->t_state = T_FLUSH;
484 journal->j_committing_transaction = commit_transaction;
485 journal->j_running_transaction = NULL;
486 start_time = ktime_get();
487 commit_transaction->t_log_start = journal->j_head;
488 wake_up(&journal->j_wait_transaction_locked);
489 spin_unlock(&journal->j_state_lock);
491 jbd_debug (3, "JBD: commit phase 2\n");
494 * Now start flushing things to disk, in the order they appear
495 * on the transaction lists. Data blocks go first.
497 err = journal_submit_data_buffers(journal, commit_transaction);
499 jbd2_journal_abort(journal, err);
501 jbd2_journal_write_revoke_records(journal, commit_transaction);
503 jbd_debug(3, "JBD: commit phase 2\n");
506 * Way to go: we have now written out all of the data for a
507 * transaction! Now comes the tricky part: we need to write out
508 * metadata. Loop over the transaction's entire buffer list:
510 spin_lock(&journal->j_state_lock);
511 commit_transaction->t_state = T_COMMIT;
512 spin_unlock(&journal->j_state_lock);
514 stats.u.run.rs_logging = jiffies;
515 stats.u.run.rs_flushing = jbd2_time_diff(stats.u.run.rs_flushing,
516 stats.u.run.rs_logging);
517 stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits;
518 stats.u.run.rs_blocks_logged = 0;
520 J_ASSERT(commit_transaction->t_nr_buffers <=
521 commit_transaction->t_outstanding_credits);
526 while (commit_transaction->t_buffers) {
528 /* Find the next buffer to be journaled... */
530 jh = commit_transaction->t_buffers;
532 /* If we're in abort mode, we just un-journal the buffer and
535 if (is_journal_aborted(journal)) {
536 clear_buffer_jbddirty(jh2bh(jh));
537 JBUFFER_TRACE(jh, "journal is aborting: refile");
538 jbd2_journal_refile_buffer(journal, jh);
539 /* If that was the last one, we need to clean up
540 * any descriptor buffers which may have been
541 * already allocated, even if we are now
543 if (!commit_transaction->t_buffers)
544 goto start_journal_io;
548 /* Make sure we have a descriptor block in which to
549 record the metadata buffer. */
552 struct buffer_head *bh;
554 J_ASSERT (bufs == 0);
556 jbd_debug(4, "JBD: get descriptor\n");
558 descriptor = jbd2_journal_get_descriptor_buffer(journal);
560 jbd2_journal_abort(journal, -EIO);
564 bh = jh2bh(descriptor);
565 jbd_debug(4, "JBD: got buffer %llu (%p)\n",
566 (unsigned long long)bh->b_blocknr, bh->b_data);
567 header = (journal_header_t *)&bh->b_data[0];
568 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
569 header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
570 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
572 tagp = &bh->b_data[sizeof(journal_header_t)];
573 space_left = bh->b_size - sizeof(journal_header_t);
575 set_buffer_jwrite(bh);
576 set_buffer_dirty(bh);
579 /* Record it so that we can wait for IO
581 BUFFER_TRACE(bh, "ph3: file as descriptor");
582 jbd2_journal_file_buffer(descriptor, commit_transaction,
586 /* Where is the buffer to be written? */
588 err = jbd2_journal_next_log_block(journal, &blocknr);
589 /* If the block mapping failed, just abandon the buffer
590 and repeat this loop: we'll fall into the
591 refile-on-abort condition above. */
593 jbd2_journal_abort(journal, err);
598 * start_this_handle() uses t_outstanding_credits to determine
599 * the free space in the log, but this counter is changed
600 * by jbd2_journal_next_log_block() also.
602 commit_transaction->t_outstanding_credits--;
604 /* Bump b_count to prevent truncate from stumbling over
605 the shadowed buffer! @@@ This can go if we ever get
606 rid of the BJ_IO/BJ_Shadow pairing of buffers. */
607 atomic_inc(&jh2bh(jh)->b_count);
609 /* Make a temporary IO buffer with which to write it out
610 (this will requeue both the metadata buffer and the
611 temporary IO buffer). new_bh goes on BJ_IO*/
613 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
615 * akpm: jbd2_journal_write_metadata_buffer() sets
616 * new_bh->b_transaction to commit_transaction.
617 * We need to clean this up before we release new_bh
618 * (which is of type BJ_IO)
620 JBUFFER_TRACE(jh, "ph3: write metadata");
621 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
622 jh, &new_jh, blocknr);
623 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
624 wbuf[bufs++] = jh2bh(new_jh);
626 /* Record the new block's tag in the current descriptor
631 tag_flag |= JBD2_FLAG_ESCAPE;
633 tag_flag |= JBD2_FLAG_SAME_UUID;
635 tag = (journal_block_tag_t *) tagp;
636 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
637 tag->t_flags = cpu_to_be32(tag_flag);
639 space_left -= tag_bytes;
642 memcpy (tagp, journal->j_uuid, 16);
648 /* If there's no more to do, or if the descriptor is full,
651 if (bufs == journal->j_wbufsize ||
652 commit_transaction->t_buffers == NULL ||
653 space_left < tag_bytes + 16) {
655 jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
657 /* Write an end-of-descriptor marker before
658 submitting the IOs. "tag" still points to
659 the last tag we set up. */
661 tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
664 for (i = 0; i < bufs; i++) {
665 struct buffer_head *bh = wbuf[i];
669 if (JBD2_HAS_COMPAT_FEATURE(journal,
670 JBD2_FEATURE_COMPAT_CHECKSUM)) {
672 jbd2_checksum_data(crc32_sum, bh);
676 clear_buffer_dirty(bh);
677 set_buffer_uptodate(bh);
678 bh->b_end_io = journal_end_buffer_io_sync;
679 submit_bh(WRITE, bh);
682 stats.u.run.rs_blocks_logged += bufs;
684 /* Force a new descriptor to be generated next
685 time round the loop. */
691 /* Done it all: now write the commit record asynchronously. */
693 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
694 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
695 err = journal_submit_commit_record(journal, commit_transaction,
698 __jbd2_journal_abort_hard(journal);
702 * This is the right place to wait for data buffers both for ASYNC
703 * and !ASYNC commit. If commit is ASYNC, we need to wait only after
704 * the commit block went to disk (which happens above). If commit is
705 * SYNC, we need to wait for data buffers before we start writing
706 * commit block, which happens below in such setting.
708 err = journal_finish_inode_data_buffers(journal, commit_transaction);
711 "JBD2: Detected IO errors while flushing file data "
712 "on %s\n", journal->j_devname);
713 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
714 jbd2_journal_abort(journal, err);
718 /* Lo and behold: we have just managed to send a transaction to
719 the log. Before we can commit it, wait for the IO so far to
720 complete. Control buffers being written are on the
721 transaction's t_log_list queue, and metadata buffers are on
722 the t_iobuf_list queue.
724 Wait for the buffers in reverse order. That way we are
725 less likely to be woken up until all IOs have completed, and
726 so we incur less scheduling load.
729 jbd_debug(3, "JBD: commit phase 3\n");
732 * akpm: these are BJ_IO, and j_list_lock is not needed.
733 * See __journal_try_to_free_buffer.
736 while (commit_transaction->t_iobuf_list != NULL) {
737 struct buffer_head *bh;
739 jh = commit_transaction->t_iobuf_list->b_tprev;
741 if (buffer_locked(bh)) {
748 if (unlikely(!buffer_uptodate(bh)))
751 clear_buffer_jwrite(bh);
753 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
754 jbd2_journal_unfile_buffer(journal, jh);
757 * ->t_iobuf_list should contain only dummy buffer_heads
758 * which were created by jbd2_journal_write_metadata_buffer().
760 BUFFER_TRACE(bh, "dumping temporary bh");
761 jbd2_journal_put_journal_head(jh);
763 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
764 free_buffer_head(bh);
766 /* We also have to unlock and free the corresponding
768 jh = commit_transaction->t_shadow_list->b_tprev;
770 clear_bit(BH_JWrite, &bh->b_state);
771 J_ASSERT_BH(bh, buffer_jbddirty(bh));
773 /* The metadata is now released for reuse, but we need
774 to remember it against this transaction so that when
775 we finally commit, we can do any checkpointing
777 JBUFFER_TRACE(jh, "file as BJ_Forget");
778 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
779 /* Wake up any transactions which were waiting for this
781 wake_up_bit(&bh->b_state, BH_Unshadow);
782 JBUFFER_TRACE(jh, "brelse shadowed buffer");
786 J_ASSERT (commit_transaction->t_shadow_list == NULL);
788 jbd_debug(3, "JBD: commit phase 4\n");
790 /* Here we wait for the revoke record and descriptor record buffers */
792 while (commit_transaction->t_log_list != NULL) {
793 struct buffer_head *bh;
795 jh = commit_transaction->t_log_list->b_tprev;
797 if (buffer_locked(bh)) {
799 goto wait_for_ctlbuf;
802 goto wait_for_ctlbuf;
804 if (unlikely(!buffer_uptodate(bh)))
807 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
808 clear_buffer_jwrite(bh);
809 jbd2_journal_unfile_buffer(journal, jh);
810 jbd2_journal_put_journal_head(jh);
811 __brelse(bh); /* One for getblk */
812 /* AKPM: bforget here */
816 jbd2_journal_abort(journal, err);
818 jbd_debug(3, "JBD: commit phase 5\n");
820 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
821 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
822 err = journal_submit_commit_record(journal, commit_transaction,
825 __jbd2_journal_abort_hard(journal);
827 if (!err && !is_journal_aborted(journal))
828 err = journal_wait_on_commit_record(journal, cbh);
831 jbd2_journal_abort(journal, err);
833 /* End of a transaction! Finally, we can do checkpoint
834 processing: any buffers committed as a result of this
835 transaction can be removed from any checkpoint list it was on
838 jbd_debug(3, "JBD: commit phase 6\n");
840 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
841 J_ASSERT(commit_transaction->t_buffers == NULL);
842 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
843 J_ASSERT(commit_transaction->t_iobuf_list == NULL);
844 J_ASSERT(commit_transaction->t_shadow_list == NULL);
845 J_ASSERT(commit_transaction->t_log_list == NULL);
849 * As there are other places (journal_unmap_buffer()) adding buffers
850 * to this list we have to be careful and hold the j_list_lock.
852 spin_lock(&journal->j_list_lock);
853 while (commit_transaction->t_forget) {
854 transaction_t *cp_transaction;
855 struct buffer_head *bh;
857 jh = commit_transaction->t_forget;
858 spin_unlock(&journal->j_list_lock);
860 jbd_lock_bh_state(bh);
861 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
862 jh->b_transaction == journal->j_running_transaction);
865 * If there is undo-protected committed data against
866 * this buffer, then we can remove it now. If it is a
867 * buffer needing such protection, the old frozen_data
868 * field now points to a committed version of the
869 * buffer, so rotate that field to the new committed
872 * Otherwise, we can just throw away the frozen data now.
874 if (jh->b_committed_data) {
875 jbd2_free(jh->b_committed_data, bh->b_size);
876 jh->b_committed_data = NULL;
877 if (jh->b_frozen_data) {
878 jh->b_committed_data = jh->b_frozen_data;
879 jh->b_frozen_data = NULL;
881 } else if (jh->b_frozen_data) {
882 jbd2_free(jh->b_frozen_data, bh->b_size);
883 jh->b_frozen_data = NULL;
886 spin_lock(&journal->j_list_lock);
887 cp_transaction = jh->b_cp_transaction;
888 if (cp_transaction) {
889 JBUFFER_TRACE(jh, "remove from old cp transaction");
890 cp_transaction->t_chp_stats.cs_dropped++;
891 __jbd2_journal_remove_checkpoint(jh);
894 /* Only re-checkpoint the buffer_head if it is marked
895 * dirty. If the buffer was added to the BJ_Forget list
896 * by jbd2_journal_forget, it may no longer be dirty and
897 * there's no point in keeping a checkpoint record for
900 /* A buffer which has been freed while still being
901 * journaled by a previous transaction may end up still
902 * being dirty here, but we want to avoid writing back
903 * that buffer in the future now that the last use has
904 * been committed. That's not only a performance gain,
905 * it also stops aliasing problems if the buffer is left
906 * behind for writeback and gets reallocated for another
907 * use in a different page. */
908 if (buffer_freed(bh)) {
909 clear_buffer_freed(bh);
910 clear_buffer_jbddirty(bh);
913 if (buffer_jbddirty(bh)) {
914 JBUFFER_TRACE(jh, "add to new checkpointing trans");
915 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
916 if (is_journal_aborted(journal))
917 clear_buffer_jbddirty(bh);
918 JBUFFER_TRACE(jh, "refile for checkpoint writeback");
919 __jbd2_journal_refile_buffer(jh);
920 jbd_unlock_bh_state(bh);
922 J_ASSERT_BH(bh, !buffer_dirty(bh));
923 /* The buffer on BJ_Forget list and not jbddirty means
924 * it has been freed by this transaction and hence it
925 * could not have been reallocated until this
926 * transaction has committed. *BUT* it could be
927 * reallocated once we have written all the data to
928 * disk and before we process the buffer on BJ_Forget
930 JBUFFER_TRACE(jh, "refile or unfile freed buffer");
931 __jbd2_journal_refile_buffer(jh);
932 if (!jh->b_transaction) {
933 jbd_unlock_bh_state(bh);
935 jbd2_journal_remove_journal_head(bh);
936 release_buffer_page(bh);
938 jbd_unlock_bh_state(bh);
940 cond_resched_lock(&journal->j_list_lock);
942 spin_unlock(&journal->j_list_lock);
944 * This is a bit sleazy. We use j_list_lock to protect transition
945 * of a transaction into T_FINISHED state and calling
946 * __jbd2_journal_drop_transaction(). Otherwise we could race with
947 * other checkpointing code processing the transaction...
949 spin_lock(&journal->j_state_lock);
950 spin_lock(&journal->j_list_lock);
952 * Now recheck if some buffers did not get attached to the transaction
953 * while the lock was dropped...
955 if (commit_transaction->t_forget) {
956 spin_unlock(&journal->j_list_lock);
957 spin_unlock(&journal->j_state_lock);
961 /* Done with this transaction! */
963 jbd_debug(3, "JBD: commit phase 7\n");
965 J_ASSERT(commit_transaction->t_state == T_COMMIT);
967 commit_transaction->t_start = jiffies;
968 stats.u.run.rs_logging = jbd2_time_diff(stats.u.run.rs_logging,
969 commit_transaction->t_start);
972 * File the transaction for history
974 stats.ts_type = JBD2_STATS_RUN;
975 stats.ts_tid = commit_transaction->t_tid;
976 stats.u.run.rs_handle_count = commit_transaction->t_handle_count;
977 spin_lock(&journal->j_history_lock);
978 memcpy(journal->j_history + journal->j_history_cur, &stats,
980 if (++journal->j_history_cur == journal->j_history_max)
981 journal->j_history_cur = 0;
984 * Calculate overall stats
986 journal->j_stats.ts_tid++;
987 journal->j_stats.u.run.rs_wait += stats.u.run.rs_wait;
988 journal->j_stats.u.run.rs_running += stats.u.run.rs_running;
989 journal->j_stats.u.run.rs_locked += stats.u.run.rs_locked;
990 journal->j_stats.u.run.rs_flushing += stats.u.run.rs_flushing;
991 journal->j_stats.u.run.rs_logging += stats.u.run.rs_logging;
992 journal->j_stats.u.run.rs_handle_count += stats.u.run.rs_handle_count;
993 journal->j_stats.u.run.rs_blocks += stats.u.run.rs_blocks;
994 journal->j_stats.u.run.rs_blocks_logged += stats.u.run.rs_blocks_logged;
995 spin_unlock(&journal->j_history_lock);
997 commit_transaction->t_state = T_FINISHED;
998 J_ASSERT(commit_transaction == journal->j_committing_transaction);
999 journal->j_commit_sequence = commit_transaction->t_tid;
1000 journal->j_committing_transaction = NULL;
1001 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1004 * weight the commit time higher than the average time so we don't
1005 * react too strongly to vast changes in the commit time
1007 if (likely(journal->j_average_commit_time))
1008 journal->j_average_commit_time = (commit_time +
1009 journal->j_average_commit_time*3) / 4;
1011 journal->j_average_commit_time = commit_time;
1012 spin_unlock(&journal->j_state_lock);
1014 if (commit_transaction->t_checkpoint_list == NULL &&
1015 commit_transaction->t_checkpoint_io_list == NULL) {
1016 __jbd2_journal_drop_transaction(journal, commit_transaction);
1019 if (journal->j_checkpoint_transactions == NULL) {
1020 journal->j_checkpoint_transactions = commit_transaction;
1021 commit_transaction->t_cpnext = commit_transaction;
1022 commit_transaction->t_cpprev = commit_transaction;
1024 commit_transaction->t_cpnext =
1025 journal->j_checkpoint_transactions;
1026 commit_transaction->t_cpprev =
1027 commit_transaction->t_cpnext->t_cpprev;
1028 commit_transaction->t_cpnext->t_cpprev =
1030 commit_transaction->t_cpprev->t_cpnext =
1034 spin_unlock(&journal->j_list_lock);
1036 if (journal->j_commit_callback)
1037 journal->j_commit_callback(journal, commit_transaction);
1039 trace_mark(jbd2_end_commit, "dev %s transaction %d head %d",
1040 journal->j_devname, commit_transaction->t_tid,
1041 journal->j_tail_sequence);
1042 jbd_debug(1, "JBD: commit %d complete, head %d\n",
1043 journal->j_commit_sequence, journal->j_tail_sequence);
1045 kfree(commit_transaction);
1047 wake_up(&journal->j_wait_done_commit);