]> git.karo-electronics.de Git - mv-sheeva.git/blob - fs/jbd2/commit.c
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jwessel...
[mv-sheeva.git] / fs / jbd2 / commit.c
1 /*
2  * linux/fs/jbd2/commit.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Journal commit routines for the generic filesystem journaling code;
13  * part of the ext2fs journaling system.
14  */
15
16 #include <linux/time.h>
17 #include <linux/fs.h>
18 #include <linux/jbd2.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/mm.h>
22 #include <linux/pagemap.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bio.h>
28 #include <linux/blkdev.h>
29 #include <linux/bitops.h>
30 #include <trace/events/jbd2.h>
31 #include <asm/system.h>
32
33 /*
34  * Default IO end handler for temporary BJ_IO buffer_heads.
35  */
36 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
37 {
38         BUFFER_TRACE(bh, "");
39         if (uptodate)
40                 set_buffer_uptodate(bh);
41         else
42                 clear_buffer_uptodate(bh);
43         unlock_buffer(bh);
44 }
45
46 /*
47  * When an ext4 file is truncated, it is possible that some pages are not
48  * successfully freed, because they are attached to a committing transaction.
49  * After the transaction commits, these pages are left on the LRU, with no
50  * ->mapping, and with attached buffers.  These pages are trivially reclaimable
51  * by the VM, but their apparent absence upsets the VM accounting, and it makes
52  * the numbers in /proc/meminfo look odd.
53  *
54  * So here, we have a buffer which has just come off the forget list.  Look to
55  * see if we can strip all buffers from the backing page.
56  *
57  * Called under lock_journal(), and possibly under journal_datalist_lock.  The
58  * caller provided us with a ref against the buffer, and we drop that here.
59  */
60 static void release_buffer_page(struct buffer_head *bh)
61 {
62         struct page *page;
63
64         if (buffer_dirty(bh))
65                 goto nope;
66         if (atomic_read(&bh->b_count) != 1)
67                 goto nope;
68         page = bh->b_page;
69         if (!page)
70                 goto nope;
71         if (page->mapping)
72                 goto nope;
73
74         /* OK, it's a truncated page */
75         if (!trylock_page(page))
76                 goto nope;
77
78         page_cache_get(page);
79         __brelse(bh);
80         try_to_free_buffers(page);
81         unlock_page(page);
82         page_cache_release(page);
83         return;
84
85 nope:
86         __brelse(bh);
87 }
88
89 /*
90  * Done it all: now submit the commit record.  We should have
91  * cleaned up our previous buffers by now, so if we are in abort
92  * mode we can now just skip the rest of the journal write
93  * entirely.
94  *
95  * Returns 1 if the journal needs to be aborted or 0 on success
96  */
97 static int journal_submit_commit_record(journal_t *journal,
98                                         transaction_t *commit_transaction,
99                                         struct buffer_head **cbh,
100                                         __u32 crc32_sum)
101 {
102         struct journal_head *descriptor;
103         struct commit_header *tmp;
104         struct buffer_head *bh;
105         int ret;
106         struct timespec now = current_kernel_time();
107
108         if (is_journal_aborted(journal))
109                 return 0;
110
111         descriptor = jbd2_journal_get_descriptor_buffer(journal);
112         if (!descriptor)
113                 return 1;
114
115         bh = jh2bh(descriptor);
116
117         tmp = (struct commit_header *)bh->b_data;
118         tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
119         tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
120         tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
121         tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
122         tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
123
124         if (JBD2_HAS_COMPAT_FEATURE(journal,
125                                     JBD2_FEATURE_COMPAT_CHECKSUM)) {
126                 tmp->h_chksum_type      = JBD2_CRC32_CHKSUM;
127                 tmp->h_chksum_size      = JBD2_CRC32_CHKSUM_SIZE;
128                 tmp->h_chksum[0]        = cpu_to_be32(crc32_sum);
129         }
130
131         JBUFFER_TRACE(descriptor, "submit commit block");
132         lock_buffer(bh);
133         clear_buffer_dirty(bh);
134         set_buffer_uptodate(bh);
135         bh->b_end_io = journal_end_buffer_io_sync;
136
137         if (journal->j_flags & JBD2_BARRIER &&
138             !JBD2_HAS_INCOMPAT_FEATURE(journal,
139                                        JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
140                 ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh);
141         else
142                 ret = submit_bh(WRITE_SYNC_PLUG, bh);
143
144         *cbh = bh;
145         return ret;
146 }
147
148 /*
149  * This function along with journal_submit_commit_record
150  * allows to write the commit record asynchronously.
151  */
152 static int journal_wait_on_commit_record(journal_t *journal,
153                                          struct buffer_head *bh)
154 {
155         int ret = 0;
156
157         clear_buffer_dirty(bh);
158         wait_on_buffer(bh);
159
160         if (unlikely(!buffer_uptodate(bh)))
161                 ret = -EIO;
162         put_bh(bh);            /* One for getblk() */
163         jbd2_journal_put_journal_head(bh2jh(bh));
164
165         return ret;
166 }
167
168 /*
169  * write the filemap data using writepage() address_space_operations.
170  * We don't do block allocation here even for delalloc. We don't
171  * use writepages() because with dealyed allocation we may be doing
172  * block allocation in writepages().
173  */
174 static int journal_submit_inode_data_buffers(struct address_space *mapping)
175 {
176         int ret;
177         struct writeback_control wbc = {
178                 .sync_mode =  WB_SYNC_ALL,
179                 .nr_to_write = mapping->nrpages * 2,
180                 .range_start = 0,
181                 .range_end = i_size_read(mapping->host),
182         };
183
184         ret = generic_writepages(mapping, &wbc);
185         return ret;
186 }
187
188 /*
189  * Submit all the data buffers of inode associated with the transaction to
190  * disk.
191  *
192  * We are in a committing transaction. Therefore no new inode can be added to
193  * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
194  * operate on from being released while we write out pages.
195  */
196 static int journal_submit_data_buffers(journal_t *journal,
197                 transaction_t *commit_transaction)
198 {
199         struct jbd2_inode *jinode;
200         int err, ret = 0;
201         struct address_space *mapping;
202
203         spin_lock(&journal->j_list_lock);
204         list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
205                 mapping = jinode->i_vfs_inode->i_mapping;
206                 set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
207                 spin_unlock(&journal->j_list_lock);
208                 /*
209                  * submit the inode data buffers. We use writepage
210                  * instead of writepages. Because writepages can do
211                  * block allocation  with delalloc. We need to write
212                  * only allocated blocks here.
213                  */
214                 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
215                 err = journal_submit_inode_data_buffers(mapping);
216                 if (!ret)
217                         ret = err;
218                 spin_lock(&journal->j_list_lock);
219                 J_ASSERT(jinode->i_transaction == commit_transaction);
220                 commit_transaction->t_flushed_data_blocks = 1;
221                 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
222                 smp_mb__after_clear_bit();
223                 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
224         }
225         spin_unlock(&journal->j_list_lock);
226         return ret;
227 }
228
229 /*
230  * Wait for data submitted for writeout, refile inodes to proper
231  * transaction if needed.
232  *
233  */
234 static int journal_finish_inode_data_buffers(journal_t *journal,
235                 transaction_t *commit_transaction)
236 {
237         struct jbd2_inode *jinode, *next_i;
238         int err, ret = 0;
239
240         /* For locking, see the comment in journal_submit_data_buffers() */
241         spin_lock(&journal->j_list_lock);
242         list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
243                 set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
244                 spin_unlock(&journal->j_list_lock);
245                 err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
246                 if (err) {
247                         /*
248                          * Because AS_EIO is cleared by
249                          * filemap_fdatawait_range(), set it again so
250                          * that user process can get -EIO from fsync().
251                          */
252                         set_bit(AS_EIO,
253                                 &jinode->i_vfs_inode->i_mapping->flags);
254
255                         if (!ret)
256                                 ret = err;
257                 }
258                 spin_lock(&journal->j_list_lock);
259                 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
260                 smp_mb__after_clear_bit();
261                 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
262         }
263
264         /* Now refile inode to proper lists */
265         list_for_each_entry_safe(jinode, next_i,
266                                  &commit_transaction->t_inode_list, i_list) {
267                 list_del(&jinode->i_list);
268                 if (jinode->i_next_transaction) {
269                         jinode->i_transaction = jinode->i_next_transaction;
270                         jinode->i_next_transaction = NULL;
271                         list_add(&jinode->i_list,
272                                 &jinode->i_transaction->t_inode_list);
273                 } else {
274                         jinode->i_transaction = NULL;
275                 }
276         }
277         spin_unlock(&journal->j_list_lock);
278
279         return ret;
280 }
281
282 static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
283 {
284         struct page *page = bh->b_page;
285         char *addr;
286         __u32 checksum;
287
288         addr = kmap_atomic(page, KM_USER0);
289         checksum = crc32_be(crc32_sum,
290                 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
291         kunmap_atomic(addr, KM_USER0);
292
293         return checksum;
294 }
295
296 static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
297                                    unsigned long long block)
298 {
299         tag->t_blocknr = cpu_to_be32(block & (u32)~0);
300         if (tag_bytes > JBD2_TAG_SIZE32)
301                 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
302 }
303
304 /*
305  * jbd2_journal_commit_transaction
306  *
307  * The primary function for committing a transaction to the log.  This
308  * function is called by the journal thread to begin a complete commit.
309  */
310 void jbd2_journal_commit_transaction(journal_t *journal)
311 {
312         struct transaction_stats_s stats;
313         transaction_t *commit_transaction;
314         struct journal_head *jh, *new_jh, *descriptor;
315         struct buffer_head **wbuf = journal->j_wbuf;
316         int bufs;
317         int flags;
318         int err;
319         unsigned long long blocknr;
320         ktime_t start_time;
321         u64 commit_time;
322         char *tagp = NULL;
323         journal_header_t *header;
324         journal_block_tag_t *tag = NULL;
325         int space_left = 0;
326         int first_tag = 0;
327         int tag_flag;
328         int i, to_free = 0;
329         int tag_bytes = journal_tag_bytes(journal);
330         struct buffer_head *cbh = NULL; /* For transactional checksums */
331         __u32 crc32_sum = ~0;
332         int write_op = WRITE_SYNC;
333
334         /*
335          * First job: lock down the current transaction and wait for
336          * all outstanding updates to complete.
337          */
338
339 #ifdef COMMIT_STATS
340         spin_lock(&journal->j_list_lock);
341         summarise_journal_usage(journal);
342         spin_unlock(&journal->j_list_lock);
343 #endif
344
345         /* Do we need to erase the effects of a prior jbd2_journal_flush? */
346         if (journal->j_flags & JBD2_FLUSHED) {
347                 jbd_debug(3, "super block updated\n");
348                 jbd2_journal_update_superblock(journal, 1);
349         } else {
350                 jbd_debug(3, "superblock not updated\n");
351         }
352
353         J_ASSERT(journal->j_running_transaction != NULL);
354         J_ASSERT(journal->j_committing_transaction == NULL);
355
356         commit_transaction = journal->j_running_transaction;
357         J_ASSERT(commit_transaction->t_state == T_RUNNING);
358
359         trace_jbd2_start_commit(journal, commit_transaction);
360         jbd_debug(1, "JBD: starting commit of transaction %d\n",
361                         commit_transaction->t_tid);
362
363         write_lock(&journal->j_state_lock);
364         commit_transaction->t_state = T_LOCKED;
365
366         /*
367          * Use plugged writes here, since we want to submit several before
368          * we unplug the device. We don't do explicit unplugging in here,
369          * instead we rely on sync_buffer() doing the unplug for us.
370          */
371         if (commit_transaction->t_synchronous_commit)
372                 write_op = WRITE_SYNC_PLUG;
373         trace_jbd2_commit_locking(journal, commit_transaction);
374         stats.run.rs_wait = commit_transaction->t_max_wait;
375         stats.run.rs_locked = jiffies;
376         stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
377                                               stats.run.rs_locked);
378
379         spin_lock(&commit_transaction->t_handle_lock);
380         while (atomic_read(&commit_transaction->t_updates)) {
381                 DEFINE_WAIT(wait);
382
383                 prepare_to_wait(&journal->j_wait_updates, &wait,
384                                         TASK_UNINTERRUPTIBLE);
385                 if (atomic_read(&commit_transaction->t_updates)) {
386                         spin_unlock(&commit_transaction->t_handle_lock);
387                         write_unlock(&journal->j_state_lock);
388                         schedule();
389                         write_lock(&journal->j_state_lock);
390                         spin_lock(&commit_transaction->t_handle_lock);
391                 }
392                 finish_wait(&journal->j_wait_updates, &wait);
393         }
394         spin_unlock(&commit_transaction->t_handle_lock);
395
396         J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
397                         journal->j_max_transaction_buffers);
398
399         /*
400          * First thing we are allowed to do is to discard any remaining
401          * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
402          * that there are no such buffers: if a large filesystem
403          * operation like a truncate needs to split itself over multiple
404          * transactions, then it may try to do a jbd2_journal_restart() while
405          * there are still BJ_Reserved buffers outstanding.  These must
406          * be released cleanly from the current transaction.
407          *
408          * In this case, the filesystem must still reserve write access
409          * again before modifying the buffer in the new transaction, but
410          * we do not require it to remember exactly which old buffers it
411          * has reserved.  This is consistent with the existing behaviour
412          * that multiple jbd2_journal_get_write_access() calls to the same
413          * buffer are perfectly permissable.
414          */
415         while (commit_transaction->t_reserved_list) {
416                 jh = commit_transaction->t_reserved_list;
417                 JBUFFER_TRACE(jh, "reserved, unused: refile");
418                 /*
419                  * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
420                  * leave undo-committed data.
421                  */
422                 if (jh->b_committed_data) {
423                         struct buffer_head *bh = jh2bh(jh);
424
425                         jbd_lock_bh_state(bh);
426                         jbd2_free(jh->b_committed_data, bh->b_size);
427                         jh->b_committed_data = NULL;
428                         jbd_unlock_bh_state(bh);
429                 }
430                 jbd2_journal_refile_buffer(journal, jh);
431         }
432
433         /*
434          * Now try to drop any written-back buffers from the journal's
435          * checkpoint lists.  We do this *before* commit because it potentially
436          * frees some memory
437          */
438         spin_lock(&journal->j_list_lock);
439         __jbd2_journal_clean_checkpoint_list(journal);
440         spin_unlock(&journal->j_list_lock);
441
442         jbd_debug (3, "JBD: commit phase 1\n");
443
444         /*
445          * Switch to a new revoke table.
446          */
447         jbd2_journal_switch_revoke_table(journal);
448
449         trace_jbd2_commit_flushing(journal, commit_transaction);
450         stats.run.rs_flushing = jiffies;
451         stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
452                                              stats.run.rs_flushing);
453
454         commit_transaction->t_state = T_FLUSH;
455         journal->j_committing_transaction = commit_transaction;
456         journal->j_running_transaction = NULL;
457         start_time = ktime_get();
458         commit_transaction->t_log_start = journal->j_head;
459         wake_up(&journal->j_wait_transaction_locked);
460         write_unlock(&journal->j_state_lock);
461
462         jbd_debug (3, "JBD: commit phase 2\n");
463
464         /*
465          * Now start flushing things to disk, in the order they appear
466          * on the transaction lists.  Data blocks go first.
467          */
468         err = journal_submit_data_buffers(journal, commit_transaction);
469         if (err)
470                 jbd2_journal_abort(journal, err);
471
472         jbd2_journal_write_revoke_records(journal, commit_transaction,
473                                           write_op);
474
475         jbd_debug(3, "JBD: commit phase 2\n");
476
477         /*
478          * Way to go: we have now written out all of the data for a
479          * transaction!  Now comes the tricky part: we need to write out
480          * metadata.  Loop over the transaction's entire buffer list:
481          */
482         write_lock(&journal->j_state_lock);
483         commit_transaction->t_state = T_COMMIT;
484         write_unlock(&journal->j_state_lock);
485
486         trace_jbd2_commit_logging(journal, commit_transaction);
487         stats.run.rs_logging = jiffies;
488         stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
489                                                stats.run.rs_logging);
490         stats.run.rs_blocks =
491                 atomic_read(&commit_transaction->t_outstanding_credits);
492         stats.run.rs_blocks_logged = 0;
493
494         J_ASSERT(commit_transaction->t_nr_buffers <=
495                  atomic_read(&commit_transaction->t_outstanding_credits));
496
497         err = 0;
498         descriptor = NULL;
499         bufs = 0;
500         while (commit_transaction->t_buffers) {
501
502                 /* Find the next buffer to be journaled... */
503
504                 jh = commit_transaction->t_buffers;
505
506                 /* If we're in abort mode, we just un-journal the buffer and
507                    release it. */
508
509                 if (is_journal_aborted(journal)) {
510                         clear_buffer_jbddirty(jh2bh(jh));
511                         JBUFFER_TRACE(jh, "journal is aborting: refile");
512                         jbd2_buffer_abort_trigger(jh,
513                                                   jh->b_frozen_data ?
514                                                   jh->b_frozen_triggers :
515                                                   jh->b_triggers);
516                         jbd2_journal_refile_buffer(journal, jh);
517                         /* If that was the last one, we need to clean up
518                          * any descriptor buffers which may have been
519                          * already allocated, even if we are now
520                          * aborting. */
521                         if (!commit_transaction->t_buffers)
522                                 goto start_journal_io;
523                         continue;
524                 }
525
526                 /* Make sure we have a descriptor block in which to
527                    record the metadata buffer. */
528
529                 if (!descriptor) {
530                         struct buffer_head *bh;
531
532                         J_ASSERT (bufs == 0);
533
534                         jbd_debug(4, "JBD: get descriptor\n");
535
536                         descriptor = jbd2_journal_get_descriptor_buffer(journal);
537                         if (!descriptor) {
538                                 jbd2_journal_abort(journal, -EIO);
539                                 continue;
540                         }
541
542                         bh = jh2bh(descriptor);
543                         jbd_debug(4, "JBD: got buffer %llu (%p)\n",
544                                 (unsigned long long)bh->b_blocknr, bh->b_data);
545                         header = (journal_header_t *)&bh->b_data[0];
546                         header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
547                         header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
548                         header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
549
550                         tagp = &bh->b_data[sizeof(journal_header_t)];
551                         space_left = bh->b_size - sizeof(journal_header_t);
552                         first_tag = 1;
553                         set_buffer_jwrite(bh);
554                         set_buffer_dirty(bh);
555                         wbuf[bufs++] = bh;
556
557                         /* Record it so that we can wait for IO
558                            completion later */
559                         BUFFER_TRACE(bh, "ph3: file as descriptor");
560                         jbd2_journal_file_buffer(descriptor, commit_transaction,
561                                         BJ_LogCtl);
562                 }
563
564                 /* Where is the buffer to be written? */
565
566                 err = jbd2_journal_next_log_block(journal, &blocknr);
567                 /* If the block mapping failed, just abandon the buffer
568                    and repeat this loop: we'll fall into the
569                    refile-on-abort condition above. */
570                 if (err) {
571                         jbd2_journal_abort(journal, err);
572                         continue;
573                 }
574
575                 /*
576                  * start_this_handle() uses t_outstanding_credits to determine
577                  * the free space in the log, but this counter is changed
578                  * by jbd2_journal_next_log_block() also.
579                  */
580                 atomic_dec(&commit_transaction->t_outstanding_credits);
581
582                 /* Bump b_count to prevent truncate from stumbling over
583                    the shadowed buffer!  @@@ This can go if we ever get
584                    rid of the BJ_IO/BJ_Shadow pairing of buffers. */
585                 atomic_inc(&jh2bh(jh)->b_count);
586
587                 /* Make a temporary IO buffer with which to write it out
588                    (this will requeue both the metadata buffer and the
589                    temporary IO buffer). new_bh goes on BJ_IO*/
590
591                 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
592                 /*
593                  * akpm: jbd2_journal_write_metadata_buffer() sets
594                  * new_bh->b_transaction to commit_transaction.
595                  * We need to clean this up before we release new_bh
596                  * (which is of type BJ_IO)
597                  */
598                 JBUFFER_TRACE(jh, "ph3: write metadata");
599                 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
600                                                       jh, &new_jh, blocknr);
601                 if (flags < 0) {
602                         jbd2_journal_abort(journal, flags);
603                         continue;
604                 }
605                 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
606                 wbuf[bufs++] = jh2bh(new_jh);
607
608                 /* Record the new block's tag in the current descriptor
609                    buffer */
610
611                 tag_flag = 0;
612                 if (flags & 1)
613                         tag_flag |= JBD2_FLAG_ESCAPE;
614                 if (!first_tag)
615                         tag_flag |= JBD2_FLAG_SAME_UUID;
616
617                 tag = (journal_block_tag_t *) tagp;
618                 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
619                 tag->t_flags = cpu_to_be32(tag_flag);
620                 tagp += tag_bytes;
621                 space_left -= tag_bytes;
622
623                 if (first_tag) {
624                         memcpy (tagp, journal->j_uuid, 16);
625                         tagp += 16;
626                         space_left -= 16;
627                         first_tag = 0;
628                 }
629
630                 /* If there's no more to do, or if the descriptor is full,
631                    let the IO rip! */
632
633                 if (bufs == journal->j_wbufsize ||
634                     commit_transaction->t_buffers == NULL ||
635                     space_left < tag_bytes + 16) {
636
637                         jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
638
639                         /* Write an end-of-descriptor marker before
640                            submitting the IOs.  "tag" still points to
641                            the last tag we set up. */
642
643                         tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
644
645 start_journal_io:
646                         for (i = 0; i < bufs; i++) {
647                                 struct buffer_head *bh = wbuf[i];
648                                 /*
649                                  * Compute checksum.
650                                  */
651                                 if (JBD2_HAS_COMPAT_FEATURE(journal,
652                                         JBD2_FEATURE_COMPAT_CHECKSUM)) {
653                                         crc32_sum =
654                                             jbd2_checksum_data(crc32_sum, bh);
655                                 }
656
657                                 lock_buffer(bh);
658                                 clear_buffer_dirty(bh);
659                                 set_buffer_uptodate(bh);
660                                 bh->b_end_io = journal_end_buffer_io_sync;
661                                 submit_bh(write_op, bh);
662                         }
663                         cond_resched();
664                         stats.run.rs_blocks_logged += bufs;
665
666                         /* Force a new descriptor to be generated next
667                            time round the loop. */
668                         descriptor = NULL;
669                         bufs = 0;
670                 }
671         }
672
673         err = journal_finish_inode_data_buffers(journal, commit_transaction);
674         if (err) {
675                 printk(KERN_WARNING
676                         "JBD2: Detected IO errors while flushing file data "
677                        "on %s\n", journal->j_devname);
678                 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
679                         jbd2_journal_abort(journal, err);
680                 err = 0;
681         }
682
683         /* 
684          * If the journal is not located on the file system device,
685          * then we must flush the file system device before we issue
686          * the commit record
687          */
688         if (commit_transaction->t_flushed_data_blocks &&
689             (journal->j_fs_dev != journal->j_dev) &&
690             (journal->j_flags & JBD2_BARRIER))
691                 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
692
693         /* Done it all: now write the commit record asynchronously. */
694         if (JBD2_HAS_INCOMPAT_FEATURE(journal,
695                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
696                 err = journal_submit_commit_record(journal, commit_transaction,
697                                                  &cbh, crc32_sum);
698                 if (err)
699                         __jbd2_journal_abort_hard(journal);
700         }
701
702         /* Lo and behold: we have just managed to send a transaction to
703            the log.  Before we can commit it, wait for the IO so far to
704            complete.  Control buffers being written are on the
705            transaction's t_log_list queue, and metadata buffers are on
706            the t_iobuf_list queue.
707
708            Wait for the buffers in reverse order.  That way we are
709            less likely to be woken up until all IOs have completed, and
710            so we incur less scheduling load.
711         */
712
713         jbd_debug(3, "JBD: commit phase 3\n");
714
715         /*
716          * akpm: these are BJ_IO, and j_list_lock is not needed.
717          * See __journal_try_to_free_buffer.
718          */
719 wait_for_iobuf:
720         while (commit_transaction->t_iobuf_list != NULL) {
721                 struct buffer_head *bh;
722
723                 jh = commit_transaction->t_iobuf_list->b_tprev;
724                 bh = jh2bh(jh);
725                 if (buffer_locked(bh)) {
726                         wait_on_buffer(bh);
727                         goto wait_for_iobuf;
728                 }
729                 if (cond_resched())
730                         goto wait_for_iobuf;
731
732                 if (unlikely(!buffer_uptodate(bh)))
733                         err = -EIO;
734
735                 clear_buffer_jwrite(bh);
736
737                 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
738                 jbd2_journal_unfile_buffer(journal, jh);
739
740                 /*
741                  * ->t_iobuf_list should contain only dummy buffer_heads
742                  * which were created by jbd2_journal_write_metadata_buffer().
743                  */
744                 BUFFER_TRACE(bh, "dumping temporary bh");
745                 jbd2_journal_put_journal_head(jh);
746                 __brelse(bh);
747                 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
748                 free_buffer_head(bh);
749
750                 /* We also have to unlock and free the corresponding
751                    shadowed buffer */
752                 jh = commit_transaction->t_shadow_list->b_tprev;
753                 bh = jh2bh(jh);
754                 clear_bit(BH_JWrite, &bh->b_state);
755                 J_ASSERT_BH(bh, buffer_jbddirty(bh));
756
757                 /* The metadata is now released for reuse, but we need
758                    to remember it against this transaction so that when
759                    we finally commit, we can do any checkpointing
760                    required. */
761                 JBUFFER_TRACE(jh, "file as BJ_Forget");
762                 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
763                 /* Wake up any transactions which were waiting for this
764                    IO to complete */
765                 wake_up_bit(&bh->b_state, BH_Unshadow);
766                 JBUFFER_TRACE(jh, "brelse shadowed buffer");
767                 __brelse(bh);
768         }
769
770         J_ASSERT (commit_transaction->t_shadow_list == NULL);
771
772         jbd_debug(3, "JBD: commit phase 4\n");
773
774         /* Here we wait for the revoke record and descriptor record buffers */
775  wait_for_ctlbuf:
776         while (commit_transaction->t_log_list != NULL) {
777                 struct buffer_head *bh;
778
779                 jh = commit_transaction->t_log_list->b_tprev;
780                 bh = jh2bh(jh);
781                 if (buffer_locked(bh)) {
782                         wait_on_buffer(bh);
783                         goto wait_for_ctlbuf;
784                 }
785                 if (cond_resched())
786                         goto wait_for_ctlbuf;
787
788                 if (unlikely(!buffer_uptodate(bh)))
789                         err = -EIO;
790
791                 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
792                 clear_buffer_jwrite(bh);
793                 jbd2_journal_unfile_buffer(journal, jh);
794                 jbd2_journal_put_journal_head(jh);
795                 __brelse(bh);           /* One for getblk */
796                 /* AKPM: bforget here */
797         }
798
799         if (err)
800                 jbd2_journal_abort(journal, err);
801
802         jbd_debug(3, "JBD: commit phase 5\n");
803
804         if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
805                                        JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
806                 err = journal_submit_commit_record(journal, commit_transaction,
807                                                 &cbh, crc32_sum);
808                 if (err)
809                         __jbd2_journal_abort_hard(journal);
810         }
811         if (!err && !is_journal_aborted(journal))
812                 err = journal_wait_on_commit_record(journal, cbh);
813         if (JBD2_HAS_INCOMPAT_FEATURE(journal,
814                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
815             journal->j_flags & JBD2_BARRIER) {
816                 blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
817         }
818
819         if (err)
820                 jbd2_journal_abort(journal, err);
821
822         /* End of a transaction!  Finally, we can do checkpoint
823            processing: any buffers committed as a result of this
824            transaction can be removed from any checkpoint list it was on
825            before. */
826
827         jbd_debug(3, "JBD: commit phase 6\n");
828
829         J_ASSERT(list_empty(&commit_transaction->t_inode_list));
830         J_ASSERT(commit_transaction->t_buffers == NULL);
831         J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
832         J_ASSERT(commit_transaction->t_iobuf_list == NULL);
833         J_ASSERT(commit_transaction->t_shadow_list == NULL);
834         J_ASSERT(commit_transaction->t_log_list == NULL);
835
836 restart_loop:
837         /*
838          * As there are other places (journal_unmap_buffer()) adding buffers
839          * to this list we have to be careful and hold the j_list_lock.
840          */
841         spin_lock(&journal->j_list_lock);
842         while (commit_transaction->t_forget) {
843                 transaction_t *cp_transaction;
844                 struct buffer_head *bh;
845
846                 jh = commit_transaction->t_forget;
847                 spin_unlock(&journal->j_list_lock);
848                 bh = jh2bh(jh);
849                 jbd_lock_bh_state(bh);
850                 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
851
852                 /*
853                  * If there is undo-protected committed data against
854                  * this buffer, then we can remove it now.  If it is a
855                  * buffer needing such protection, the old frozen_data
856                  * field now points to a committed version of the
857                  * buffer, so rotate that field to the new committed
858                  * data.
859                  *
860                  * Otherwise, we can just throw away the frozen data now.
861                  *
862                  * We also know that the frozen data has already fired
863                  * its triggers if they exist, so we can clear that too.
864                  */
865                 if (jh->b_committed_data) {
866                         jbd2_free(jh->b_committed_data, bh->b_size);
867                         jh->b_committed_data = NULL;
868                         if (jh->b_frozen_data) {
869                                 jh->b_committed_data = jh->b_frozen_data;
870                                 jh->b_frozen_data = NULL;
871                                 jh->b_frozen_triggers = NULL;
872                         }
873                 } else if (jh->b_frozen_data) {
874                         jbd2_free(jh->b_frozen_data, bh->b_size);
875                         jh->b_frozen_data = NULL;
876                         jh->b_frozen_triggers = NULL;
877                 }
878
879                 spin_lock(&journal->j_list_lock);
880                 cp_transaction = jh->b_cp_transaction;
881                 if (cp_transaction) {
882                         JBUFFER_TRACE(jh, "remove from old cp transaction");
883                         cp_transaction->t_chp_stats.cs_dropped++;
884                         __jbd2_journal_remove_checkpoint(jh);
885                 }
886
887                 /* Only re-checkpoint the buffer_head if it is marked
888                  * dirty.  If the buffer was added to the BJ_Forget list
889                  * by jbd2_journal_forget, it may no longer be dirty and
890                  * there's no point in keeping a checkpoint record for
891                  * it. */
892
893                 /* A buffer which has been freed while still being
894                  * journaled by a previous transaction may end up still
895                  * being dirty here, but we want to avoid writing back
896                  * that buffer in the future after the "add to orphan"
897                  * operation been committed,  That's not only a performance
898                  * gain, it also stops aliasing problems if the buffer is
899                  * left behind for writeback and gets reallocated for another
900                  * use in a different page. */
901                 if (buffer_freed(bh) && !jh->b_next_transaction) {
902                         clear_buffer_freed(bh);
903                         clear_buffer_jbddirty(bh);
904                 }
905
906                 if (buffer_jbddirty(bh)) {
907                         JBUFFER_TRACE(jh, "add to new checkpointing trans");
908                         __jbd2_journal_insert_checkpoint(jh, commit_transaction);
909                         if (is_journal_aborted(journal))
910                                 clear_buffer_jbddirty(bh);
911                         JBUFFER_TRACE(jh, "refile for checkpoint writeback");
912                         __jbd2_journal_refile_buffer(jh);
913                         jbd_unlock_bh_state(bh);
914                 } else {
915                         J_ASSERT_BH(bh, !buffer_dirty(bh));
916                         /* The buffer on BJ_Forget list and not jbddirty means
917                          * it has been freed by this transaction and hence it
918                          * could not have been reallocated until this
919                          * transaction has committed. *BUT* it could be
920                          * reallocated once we have written all the data to
921                          * disk and before we process the buffer on BJ_Forget
922                          * list. */
923                         JBUFFER_TRACE(jh, "refile or unfile freed buffer");
924                         __jbd2_journal_refile_buffer(jh);
925                         if (!jh->b_transaction) {
926                                 jbd_unlock_bh_state(bh);
927                                  /* needs a brelse */
928                                 jbd2_journal_remove_journal_head(bh);
929                                 release_buffer_page(bh);
930                         } else
931                                 jbd_unlock_bh_state(bh);
932                 }
933                 cond_resched_lock(&journal->j_list_lock);
934         }
935         spin_unlock(&journal->j_list_lock);
936         /*
937          * This is a bit sleazy.  We use j_list_lock to protect transition
938          * of a transaction into T_FINISHED state and calling
939          * __jbd2_journal_drop_transaction(). Otherwise we could race with
940          * other checkpointing code processing the transaction...
941          */
942         write_lock(&journal->j_state_lock);
943         spin_lock(&journal->j_list_lock);
944         /*
945          * Now recheck if some buffers did not get attached to the transaction
946          * while the lock was dropped...
947          */
948         if (commit_transaction->t_forget) {
949                 spin_unlock(&journal->j_list_lock);
950                 write_unlock(&journal->j_state_lock);
951                 goto restart_loop;
952         }
953
954         /* Done with this transaction! */
955
956         jbd_debug(3, "JBD: commit phase 7\n");
957
958         J_ASSERT(commit_transaction->t_state == T_COMMIT);
959
960         commit_transaction->t_start = jiffies;
961         stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
962                                               commit_transaction->t_start);
963
964         /*
965          * File the transaction statistics
966          */
967         stats.ts_tid = commit_transaction->t_tid;
968         stats.run.rs_handle_count =
969                 atomic_read(&commit_transaction->t_handle_count);
970         trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
971                              commit_transaction->t_tid, &stats.run);
972
973         /*
974          * Calculate overall stats
975          */
976         spin_lock(&journal->j_history_lock);
977         journal->j_stats.ts_tid++;
978         journal->j_stats.run.rs_wait += stats.run.rs_wait;
979         journal->j_stats.run.rs_running += stats.run.rs_running;
980         journal->j_stats.run.rs_locked += stats.run.rs_locked;
981         journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
982         journal->j_stats.run.rs_logging += stats.run.rs_logging;
983         journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
984         journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
985         journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
986         spin_unlock(&journal->j_history_lock);
987
988         commit_transaction->t_state = T_FINISHED;
989         J_ASSERT(commit_transaction == journal->j_committing_transaction);
990         journal->j_commit_sequence = commit_transaction->t_tid;
991         journal->j_committing_transaction = NULL;
992         commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
993
994         /*
995          * weight the commit time higher than the average time so we don't
996          * react too strongly to vast changes in the commit time
997          */
998         if (likely(journal->j_average_commit_time))
999                 journal->j_average_commit_time = (commit_time +
1000                                 journal->j_average_commit_time*3) / 4;
1001         else
1002                 journal->j_average_commit_time = commit_time;
1003         write_unlock(&journal->j_state_lock);
1004
1005         if (commit_transaction->t_checkpoint_list == NULL &&
1006             commit_transaction->t_checkpoint_io_list == NULL) {
1007                 __jbd2_journal_drop_transaction(journal, commit_transaction);
1008                 to_free = 1;
1009         } else {
1010                 if (journal->j_checkpoint_transactions == NULL) {
1011                         journal->j_checkpoint_transactions = commit_transaction;
1012                         commit_transaction->t_cpnext = commit_transaction;
1013                         commit_transaction->t_cpprev = commit_transaction;
1014                 } else {
1015                         commit_transaction->t_cpnext =
1016                                 journal->j_checkpoint_transactions;
1017                         commit_transaction->t_cpprev =
1018                                 commit_transaction->t_cpnext->t_cpprev;
1019                         commit_transaction->t_cpnext->t_cpprev =
1020                                 commit_transaction;
1021                         commit_transaction->t_cpprev->t_cpnext =
1022                                 commit_transaction;
1023                 }
1024         }
1025         spin_unlock(&journal->j_list_lock);
1026
1027         if (journal->j_commit_callback)
1028                 journal->j_commit_callback(journal, commit_transaction);
1029
1030         trace_jbd2_end_commit(journal, commit_transaction);
1031         jbd_debug(1, "JBD: commit %d complete, head %d\n",
1032                   journal->j_commit_sequence, journal->j_tail_sequence);
1033         if (to_free)
1034                 kfree(commit_transaction);
1035
1036         wake_up(&journal->j_wait_done_commit);
1037 }