2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_format.h"
21 #include "xfs_shared.h"
25 #include "xfs_trans.h"
28 #include "xfs_mount.h"
29 #include "xfs_error.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_btree.h"
34 #include "xfs_dinode.h"
35 #include "xfs_inode.h"
36 #include "xfs_inode_item.h"
37 #include "xfs_alloc.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_log_priv.h"
40 #include "xfs_buf_item.h"
41 #include "xfs_log_recover.h"
42 #include "xfs_extfree_item.h"
43 #include "xfs_trans_priv.h"
44 #include "xfs_quota.h"
45 #include "xfs_cksum.h"
46 #include "xfs_trace.h"
47 #include "xfs_icache.h"
48 #include "xfs_icreate_item.h"
50 /* Need all the magic numbers and buffer ops structures from these headers */
51 #include "xfs_symlink.h"
52 #include "xfs_da_btree.h"
53 #include "xfs_dir2_format.h"
55 #include "xfs_attr_leaf.h"
56 #include "xfs_attr_remote.h"
58 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
65 xlog_clear_stale_blocks(
70 xlog_recover_check_summary(
73 #define xlog_recover_check_summary(log)
77 * This structure is used during recovery to record the buf log items which
78 * have been canceled and should not be replayed.
80 struct xfs_buf_cancel {
84 struct list_head bc_list;
88 * Sector aligned buffer routines for buffer create/read/write/access
92 * Verify the given count of basic blocks is valid number of blocks
93 * to specify for an operation involving the given XFS log buffer.
94 * Returns nonzero if the count is valid, 0 otherwise.
98 xlog_buf_bbcount_valid(
102 return bbcount > 0 && bbcount <= log->l_logBBsize;
106 * Allocate a buffer to hold log data. The buffer needs to be able
107 * to map to a range of nbblks basic blocks at any valid (basic
108 * block) offset within the log.
117 if (!xlog_buf_bbcount_valid(log, nbblks)) {
118 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
120 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
125 * We do log I/O in units of log sectors (a power-of-2
126 * multiple of the basic block size), so we round up the
127 * requested size to accommodate the basic blocks required
128 * for complete log sectors.
130 * In addition, the buffer may be used for a non-sector-
131 * aligned block offset, in which case an I/O of the
132 * requested size could extend beyond the end of the
133 * buffer. If the requested size is only 1 basic block it
134 * will never straddle a sector boundary, so this won't be
135 * an issue. Nor will this be a problem if the log I/O is
136 * done in basic blocks (sector size 1). But otherwise we
137 * extend the buffer by one extra log sector to ensure
138 * there's space to accommodate this possibility.
140 if (nbblks > 1 && log->l_sectBBsize > 1)
141 nbblks += log->l_sectBBsize;
142 nbblks = round_up(nbblks, log->l_sectBBsize);
144 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
158 * Return the address of the start of the given block number's data
159 * in a log buffer. The buffer covers a log sector-aligned region.
168 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
170 ASSERT(offset + nbblks <= bp->b_length);
171 return bp->b_addr + BBTOB(offset);
176 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
187 if (!xlog_buf_bbcount_valid(log, nbblks)) {
188 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
190 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
194 blk_no = round_down(blk_no, log->l_sectBBsize);
195 nbblks = round_up(nbblks, log->l_sectBBsize);
198 ASSERT(nbblks <= bp->b_length);
200 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
202 bp->b_io_length = nbblks;
205 xfsbdstrat(log->l_mp, bp);
206 error = xfs_buf_iowait(bp);
208 xfs_buf_ioerror_alert(bp, __func__);
222 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
226 *offset = xlog_align(log, blk_no, nbblks, bp);
231 * Read at an offset into the buffer. Returns with the buffer in it's original
232 * state regardless of the result of the read.
237 xfs_daddr_t blk_no, /* block to read from */
238 int nbblks, /* blocks to read */
242 xfs_caddr_t orig_offset = bp->b_addr;
243 int orig_len = BBTOB(bp->b_length);
246 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
250 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
252 /* must reset buffer pointer even on error */
253 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
260 * Write out the buffer at the given block for the given number of blocks.
261 * The buffer is kept locked across the write and is returned locked.
262 * This can only be used for synchronous log writes.
273 if (!xlog_buf_bbcount_valid(log, nbblks)) {
274 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
276 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
280 blk_no = round_down(blk_no, log->l_sectBBsize);
281 nbblks = round_up(nbblks, log->l_sectBBsize);
284 ASSERT(nbblks <= bp->b_length);
286 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
287 XFS_BUF_ZEROFLAGS(bp);
290 bp->b_io_length = nbblks;
293 error = xfs_bwrite(bp);
295 xfs_buf_ioerror_alert(bp, __func__);
302 * dump debug superblock and log record information
305 xlog_header_check_dump(
307 xlog_rec_header_t *head)
309 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
310 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
311 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
312 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
315 #define xlog_header_check_dump(mp, head)
319 * check log record header for recovery
322 xlog_header_check_recover(
324 xlog_rec_header_t *head)
326 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
329 * IRIX doesn't write the h_fmt field and leaves it zeroed
330 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
331 * a dirty log created in IRIX.
333 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
335 "dirty log written in incompatible format - can't recover");
336 xlog_header_check_dump(mp, head);
337 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
338 XFS_ERRLEVEL_HIGH, mp);
339 return XFS_ERROR(EFSCORRUPTED);
340 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
342 "dirty log entry has mismatched uuid - can't recover");
343 xlog_header_check_dump(mp, head);
344 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
345 XFS_ERRLEVEL_HIGH, mp);
346 return XFS_ERROR(EFSCORRUPTED);
352 * read the head block of the log and check the header
355 xlog_header_check_mount(
357 xlog_rec_header_t *head)
359 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
361 if (uuid_is_nil(&head->h_fs_uuid)) {
363 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
364 * h_fs_uuid is nil, we assume this log was last mounted
365 * by IRIX and continue.
367 xfs_warn(mp, "nil uuid in log - IRIX style log");
368 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
369 xfs_warn(mp, "log has mismatched uuid - can't recover");
370 xlog_header_check_dump(mp, head);
371 XFS_ERROR_REPORT("xlog_header_check_mount",
372 XFS_ERRLEVEL_HIGH, mp);
373 return XFS_ERROR(EFSCORRUPTED);
384 * We're not going to bother about retrying
385 * this during recovery. One strike!
387 xfs_buf_ioerror_alert(bp, __func__);
388 xfs_force_shutdown(bp->b_target->bt_mount,
389 SHUTDOWN_META_IO_ERROR);
392 xfs_buf_ioend(bp, 0);
396 * This routine finds (to an approximation) the first block in the physical
397 * log which contains the given cycle. It uses a binary search algorithm.
398 * Note that the algorithm can not be perfect because the disk will not
399 * necessarily be perfect.
402 xlog_find_cycle_start(
405 xfs_daddr_t first_blk,
406 xfs_daddr_t *last_blk,
416 mid_blk = BLK_AVG(first_blk, end_blk);
417 while (mid_blk != first_blk && mid_blk != end_blk) {
418 error = xlog_bread(log, mid_blk, 1, bp, &offset);
421 mid_cycle = xlog_get_cycle(offset);
422 if (mid_cycle == cycle)
423 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
425 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
426 mid_blk = BLK_AVG(first_blk, end_blk);
428 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
429 (mid_blk == end_blk && mid_blk-1 == first_blk));
437 * Check that a range of blocks does not contain stop_on_cycle_no.
438 * Fill in *new_blk with the block offset where such a block is
439 * found, or with -1 (an invalid block number) if there is no such
440 * block in the range. The scan needs to occur from front to back
441 * and the pointer into the region must be updated since a later
442 * routine will need to perform another test.
445 xlog_find_verify_cycle(
447 xfs_daddr_t start_blk,
449 uint stop_on_cycle_no,
450 xfs_daddr_t *new_blk)
456 xfs_caddr_t buf = NULL;
460 * Greedily allocate a buffer big enough to handle the full
461 * range of basic blocks we'll be examining. If that fails,
462 * try a smaller size. We need to be able to read at least
463 * a log sector, or we're out of luck.
465 bufblks = 1 << ffs(nbblks);
466 while (bufblks > log->l_logBBsize)
468 while (!(bp = xlog_get_bp(log, bufblks))) {
470 if (bufblks < log->l_sectBBsize)
474 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
477 bcount = min(bufblks, (start_blk + nbblks - i));
479 error = xlog_bread(log, i, bcount, bp, &buf);
483 for (j = 0; j < bcount; j++) {
484 cycle = xlog_get_cycle(buf);
485 if (cycle == stop_on_cycle_no) {
502 * Potentially backup over partial log record write.
504 * In the typical case, last_blk is the number of the block directly after
505 * a good log record. Therefore, we subtract one to get the block number
506 * of the last block in the given buffer. extra_bblks contains the number
507 * of blocks we would have read on a previous read. This happens when the
508 * last log record is split over the end of the physical log.
510 * extra_bblks is the number of blocks potentially verified on a previous
511 * call to this routine.
514 xlog_find_verify_log_record(
516 xfs_daddr_t start_blk,
517 xfs_daddr_t *last_blk,
522 xfs_caddr_t offset = NULL;
523 xlog_rec_header_t *head = NULL;
526 int num_blks = *last_blk - start_blk;
529 ASSERT(start_blk != 0 || *last_blk != start_blk);
531 if (!(bp = xlog_get_bp(log, num_blks))) {
532 if (!(bp = xlog_get_bp(log, 1)))
536 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
539 offset += ((num_blks - 1) << BBSHIFT);
542 for (i = (*last_blk) - 1; i >= 0; i--) {
544 /* valid log record not found */
546 "Log inconsistent (didn't find previous header)");
548 error = XFS_ERROR(EIO);
553 error = xlog_bread(log, i, 1, bp, &offset);
558 head = (xlog_rec_header_t *)offset;
560 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
568 * We hit the beginning of the physical log & still no header. Return
569 * to caller. If caller can handle a return of -1, then this routine
570 * will be called again for the end of the physical log.
578 * We have the final block of the good log (the first block
579 * of the log record _before_ the head. So we check the uuid.
581 if ((error = xlog_header_check_mount(log->l_mp, head)))
585 * We may have found a log record header before we expected one.
586 * last_blk will be the 1st block # with a given cycle #. We may end
587 * up reading an entire log record. In this case, we don't want to
588 * reset last_blk. Only when last_blk points in the middle of a log
589 * record do we update last_blk.
591 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
592 uint h_size = be32_to_cpu(head->h_size);
594 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
595 if (h_size % XLOG_HEADER_CYCLE_SIZE)
601 if (*last_blk - i + extra_bblks !=
602 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
611 * Head is defined to be the point of the log where the next log write
612 * could go. This means that incomplete LR writes at the end are
613 * eliminated when calculating the head. We aren't guaranteed that previous
614 * LR have complete transactions. We only know that a cycle number of
615 * current cycle number -1 won't be present in the log if we start writing
616 * from our current block number.
618 * last_blk contains the block number of the first block with a given
621 * Return: zero if normal, non-zero if error.
626 xfs_daddr_t *return_head_blk)
630 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
632 uint first_half_cycle, last_half_cycle;
634 int error, log_bbnum = log->l_logBBsize;
636 /* Is the end of the log device zeroed? */
637 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
638 *return_head_blk = first_blk;
640 /* Is the whole lot zeroed? */
642 /* Linux XFS shouldn't generate totally zeroed logs -
643 * mkfs etc write a dummy unmount record to a fresh
644 * log so we can store the uuid in there
646 xfs_warn(log->l_mp, "totally zeroed log");
651 xfs_warn(log->l_mp, "empty log check failed");
655 first_blk = 0; /* get cycle # of 1st block */
656 bp = xlog_get_bp(log, 1);
660 error = xlog_bread(log, 0, 1, bp, &offset);
664 first_half_cycle = xlog_get_cycle(offset);
666 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
667 error = xlog_bread(log, last_blk, 1, bp, &offset);
671 last_half_cycle = xlog_get_cycle(offset);
672 ASSERT(last_half_cycle != 0);
675 * If the 1st half cycle number is equal to the last half cycle number,
676 * then the entire log is stamped with the same cycle number. In this
677 * case, head_blk can't be set to zero (which makes sense). The below
678 * math doesn't work out properly with head_blk equal to zero. Instead,
679 * we set it to log_bbnum which is an invalid block number, but this
680 * value makes the math correct. If head_blk doesn't changed through
681 * all the tests below, *head_blk is set to zero at the very end rather
682 * than log_bbnum. In a sense, log_bbnum and zero are the same block
683 * in a circular file.
685 if (first_half_cycle == last_half_cycle) {
687 * In this case we believe that the entire log should have
688 * cycle number last_half_cycle. We need to scan backwards
689 * from the end verifying that there are no holes still
690 * containing last_half_cycle - 1. If we find such a hole,
691 * then the start of that hole will be the new head. The
692 * simple case looks like
693 * x | x ... | x - 1 | x
694 * Another case that fits this picture would be
695 * x | x + 1 | x ... | x
696 * In this case the head really is somewhere at the end of the
697 * log, as one of the latest writes at the beginning was
700 * x | x + 1 | x ... | x - 1 | x
701 * This is really the combination of the above two cases, and
702 * the head has to end up at the start of the x-1 hole at the
705 * In the 256k log case, we will read from the beginning to the
706 * end of the log and search for cycle numbers equal to x-1.
707 * We don't worry about the x+1 blocks that we encounter,
708 * because we know that they cannot be the head since the log
711 head_blk = log_bbnum;
712 stop_on_cycle = last_half_cycle - 1;
715 * In this case we want to find the first block with cycle
716 * number matching last_half_cycle. We expect the log to be
718 * x + 1 ... | x ... | x
719 * The first block with cycle number x (last_half_cycle) will
720 * be where the new head belongs. First we do a binary search
721 * for the first occurrence of last_half_cycle. The binary
722 * search may not be totally accurate, so then we scan back
723 * from there looking for occurrences of last_half_cycle before
724 * us. If that backwards scan wraps around the beginning of
725 * the log, then we look for occurrences of last_half_cycle - 1
726 * at the end of the log. The cases we're looking for look
728 * v binary search stopped here
729 * x + 1 ... | x | x + 1 | x ... | x
730 * ^ but we want to locate this spot
732 * <---------> less than scan distance
733 * x + 1 ... | x ... | x - 1 | x
734 * ^ we want to locate this spot
736 stop_on_cycle = last_half_cycle;
737 if ((error = xlog_find_cycle_start(log, bp, first_blk,
738 &head_blk, last_half_cycle)))
743 * Now validate the answer. Scan back some number of maximum possible
744 * blocks and make sure each one has the expected cycle number. The
745 * maximum is determined by the total possible amount of buffering
746 * in the in-core log. The following number can be made tighter if
747 * we actually look at the block size of the filesystem.
749 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
750 if (head_blk >= num_scan_bblks) {
752 * We are guaranteed that the entire check can be performed
755 start_blk = head_blk - num_scan_bblks;
756 if ((error = xlog_find_verify_cycle(log,
757 start_blk, num_scan_bblks,
758 stop_on_cycle, &new_blk)))
762 } else { /* need to read 2 parts of log */
764 * We are going to scan backwards in the log in two parts.
765 * First we scan the physical end of the log. In this part
766 * of the log, we are looking for blocks with cycle number
767 * last_half_cycle - 1.
768 * If we find one, then we know that the log starts there, as
769 * we've found a hole that didn't get written in going around
770 * the end of the physical log. The simple case for this is
771 * x + 1 ... | x ... | x - 1 | x
772 * <---------> less than scan distance
773 * If all of the blocks at the end of the log have cycle number
774 * last_half_cycle, then we check the blocks at the start of
775 * the log looking for occurrences of last_half_cycle. If we
776 * find one, then our current estimate for the location of the
777 * first occurrence of last_half_cycle is wrong and we move
778 * back to the hole we've found. This case looks like
779 * x + 1 ... | x | x + 1 | x ...
780 * ^ binary search stopped here
781 * Another case we need to handle that only occurs in 256k
783 * x + 1 ... | x ... | x+1 | x ...
784 * ^ binary search stops here
785 * In a 256k log, the scan at the end of the log will see the
786 * x + 1 blocks. We need to skip past those since that is
787 * certainly not the head of the log. By searching for
788 * last_half_cycle-1 we accomplish that.
790 ASSERT(head_blk <= INT_MAX &&
791 (xfs_daddr_t) num_scan_bblks >= head_blk);
792 start_blk = log_bbnum - (num_scan_bblks - head_blk);
793 if ((error = xlog_find_verify_cycle(log, start_blk,
794 num_scan_bblks - (int)head_blk,
795 (stop_on_cycle - 1), &new_blk)))
803 * Scan beginning of log now. The last part of the physical
804 * log is good. This scan needs to verify that it doesn't find
805 * the last_half_cycle.
808 ASSERT(head_blk <= INT_MAX);
809 if ((error = xlog_find_verify_cycle(log,
810 start_blk, (int)head_blk,
811 stop_on_cycle, &new_blk)))
819 * Now we need to make sure head_blk is not pointing to a block in
820 * the middle of a log record.
822 num_scan_bblks = XLOG_REC_SHIFT(log);
823 if (head_blk >= num_scan_bblks) {
824 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
826 /* start ptr at last block ptr before head_blk */
827 if ((error = xlog_find_verify_log_record(log, start_blk,
828 &head_blk, 0)) == -1) {
829 error = XFS_ERROR(EIO);
835 ASSERT(head_blk <= INT_MAX);
836 if ((error = xlog_find_verify_log_record(log, start_blk,
837 &head_blk, 0)) == -1) {
838 /* We hit the beginning of the log during our search */
839 start_blk = log_bbnum - (num_scan_bblks - head_blk);
841 ASSERT(start_blk <= INT_MAX &&
842 (xfs_daddr_t) log_bbnum-start_blk >= 0);
843 ASSERT(head_blk <= INT_MAX);
844 if ((error = xlog_find_verify_log_record(log,
846 (int)head_blk)) == -1) {
847 error = XFS_ERROR(EIO);
851 if (new_blk != log_bbnum)
858 if (head_blk == log_bbnum)
859 *return_head_blk = 0;
861 *return_head_blk = head_blk;
863 * When returning here, we have a good block number. Bad block
864 * means that during a previous crash, we didn't have a clean break
865 * from cycle number N to cycle number N-1. In this case, we need
866 * to find the first block with cycle number N-1.
874 xfs_warn(log->l_mp, "failed to find log head");
879 * Find the sync block number or the tail of the log.
881 * This will be the block number of the last record to have its
882 * associated buffers synced to disk. Every log record header has
883 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
884 * to get a sync block number. The only concern is to figure out which
885 * log record header to believe.
887 * The following algorithm uses the log record header with the largest
888 * lsn. The entire log record does not need to be valid. We only care
889 * that the header is valid.
891 * We could speed up search by using current head_blk buffer, but it is not
897 xfs_daddr_t *head_blk,
898 xfs_daddr_t *tail_blk)
900 xlog_rec_header_t *rhead;
901 xlog_op_header_t *op_head;
902 xfs_caddr_t offset = NULL;
905 xfs_daddr_t umount_data_blk;
906 xfs_daddr_t after_umount_blk;
913 * Find previous log record
915 if ((error = xlog_find_head(log, head_blk)))
918 bp = xlog_get_bp(log, 1);
921 if (*head_blk == 0) { /* special case */
922 error = xlog_bread(log, 0, 1, bp, &offset);
926 if (xlog_get_cycle(offset) == 0) {
928 /* leave all other log inited values alone */
934 * Search backwards looking for log record header block
936 ASSERT(*head_blk < INT_MAX);
937 for (i = (int)(*head_blk) - 1; i >= 0; i--) {
938 error = xlog_bread(log, i, 1, bp, &offset);
942 if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
948 * If we haven't found the log record header block, start looking
949 * again from the end of the physical log. XXXmiken: There should be
950 * a check here to make sure we didn't search more than N blocks in
954 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
955 error = xlog_bread(log, i, 1, bp, &offset);
959 if (*(__be32 *)offset ==
960 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
967 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
970 return XFS_ERROR(EIO);
973 /* find blk_no of tail of log */
974 rhead = (xlog_rec_header_t *)offset;
975 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
978 * Reset log values according to the state of the log when we
979 * crashed. In the case where head_blk == 0, we bump curr_cycle
980 * one because the next write starts a new cycle rather than
981 * continuing the cycle of the last good log record. At this
982 * point we have guaranteed that all partial log records have been
983 * accounted for. Therefore, we know that the last good log record
984 * written was complete and ended exactly on the end boundary
985 * of the physical log.
987 log->l_prev_block = i;
988 log->l_curr_block = (int)*head_blk;
989 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
992 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
993 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
994 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
995 BBTOB(log->l_curr_block));
996 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
997 BBTOB(log->l_curr_block));
1000 * Look for unmount record. If we find it, then we know there
1001 * was a clean unmount. Since 'i' could be the last block in
1002 * the physical log, we convert to a log block before comparing
1005 * Save the current tail lsn to use to pass to
1006 * xlog_clear_stale_blocks() below. We won't want to clear the
1007 * unmount record if there is one, so we pass the lsn of the
1008 * unmount record rather than the block after it.
1010 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1011 int h_size = be32_to_cpu(rhead->h_size);
1012 int h_version = be32_to_cpu(rhead->h_version);
1014 if ((h_version & XLOG_VERSION_2) &&
1015 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1016 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1017 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1025 after_umount_blk = (i + hblks + (int)
1026 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1027 tail_lsn = atomic64_read(&log->l_tail_lsn);
1028 if (*head_blk == after_umount_blk &&
1029 be32_to_cpu(rhead->h_num_logops) == 1) {
1030 umount_data_blk = (i + hblks) % log->l_logBBsize;
1031 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1035 op_head = (xlog_op_header_t *)offset;
1036 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1038 * Set tail and last sync so that newly written
1039 * log records will point recovery to after the
1040 * current unmount record.
1042 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1043 log->l_curr_cycle, after_umount_blk);
1044 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1045 log->l_curr_cycle, after_umount_blk);
1046 *tail_blk = after_umount_blk;
1049 * Note that the unmount was clean. If the unmount
1050 * was not clean, we need to know this to rebuild the
1051 * superblock counters from the perag headers if we
1052 * have a filesystem using non-persistent counters.
1054 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1059 * Make sure that there are no blocks in front of the head
1060 * with the same cycle number as the head. This can happen
1061 * because we allow multiple outstanding log writes concurrently,
1062 * and the later writes might make it out before earlier ones.
1064 * We use the lsn from before modifying it so that we'll never
1065 * overwrite the unmount record after a clean unmount.
1067 * Do this only if we are going to recover the filesystem
1069 * NOTE: This used to say "if (!readonly)"
1070 * However on Linux, we can & do recover a read-only filesystem.
1071 * We only skip recovery if NORECOVERY is specified on mount,
1072 * in which case we would not be here.
1074 * But... if the -device- itself is readonly, just skip this.
1075 * We can't recover this device anyway, so it won't matter.
1077 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1078 error = xlog_clear_stale_blocks(log, tail_lsn);
1084 xfs_warn(log->l_mp, "failed to locate log tail");
1089 * Is the log zeroed at all?
1091 * The last binary search should be changed to perform an X block read
1092 * once X becomes small enough. You can then search linearly through
1093 * the X blocks. This will cut down on the number of reads we need to do.
1095 * If the log is partially zeroed, this routine will pass back the blkno
1096 * of the first block with cycle number 0. It won't have a complete LR
1100 * 0 => the log is completely written to
1101 * -1 => use *blk_no as the first block of the log
1102 * >0 => error has occurred
1107 xfs_daddr_t *blk_no)
1111 uint first_cycle, last_cycle;
1112 xfs_daddr_t new_blk, last_blk, start_blk;
1113 xfs_daddr_t num_scan_bblks;
1114 int error, log_bbnum = log->l_logBBsize;
1118 /* check totally zeroed log */
1119 bp = xlog_get_bp(log, 1);
1122 error = xlog_bread(log, 0, 1, bp, &offset);
1126 first_cycle = xlog_get_cycle(offset);
1127 if (first_cycle == 0) { /* completely zeroed log */
1133 /* check partially zeroed log */
1134 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1138 last_cycle = xlog_get_cycle(offset);
1139 if (last_cycle != 0) { /* log completely written to */
1142 } else if (first_cycle != 1) {
1144 * If the cycle of the last block is zero, the cycle of
1145 * the first block must be 1. If it's not, maybe we're
1146 * not looking at a log... Bail out.
1149 "Log inconsistent or not a log (last==0, first!=1)");
1150 error = XFS_ERROR(EINVAL);
1154 /* we have a partially zeroed log */
1155 last_blk = log_bbnum-1;
1156 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1160 * Validate the answer. Because there is no way to guarantee that
1161 * the entire log is made up of log records which are the same size,
1162 * we scan over the defined maximum blocks. At this point, the maximum
1163 * is not chosen to mean anything special. XXXmiken
1165 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1166 ASSERT(num_scan_bblks <= INT_MAX);
1168 if (last_blk < num_scan_bblks)
1169 num_scan_bblks = last_blk;
1170 start_blk = last_blk - num_scan_bblks;
1173 * We search for any instances of cycle number 0 that occur before
1174 * our current estimate of the head. What we're trying to detect is
1175 * 1 ... | 0 | 1 | 0...
1176 * ^ binary search ends here
1178 if ((error = xlog_find_verify_cycle(log, start_blk,
1179 (int)num_scan_bblks, 0, &new_blk)))
1185 * Potentially backup over partial log record write. We don't need
1186 * to search the end of the log because we know it is zero.
1188 if ((error = xlog_find_verify_log_record(log, start_blk,
1189 &last_blk, 0)) == -1) {
1190 error = XFS_ERROR(EIO);
1204 * These are simple subroutines used by xlog_clear_stale_blocks() below
1205 * to initialize a buffer full of empty log record headers and write
1206 * them into the log.
1217 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1219 memset(buf, 0, BBSIZE);
1220 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1221 recp->h_cycle = cpu_to_be32(cycle);
1222 recp->h_version = cpu_to_be32(
1223 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1224 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1225 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1226 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1227 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1231 xlog_write_log_records(
1242 int sectbb = log->l_sectBBsize;
1243 int end_block = start_block + blocks;
1249 * Greedily allocate a buffer big enough to handle the full
1250 * range of basic blocks to be written. If that fails, try
1251 * a smaller size. We need to be able to write at least a
1252 * log sector, or we're out of luck.
1254 bufblks = 1 << ffs(blocks);
1255 while (bufblks > log->l_logBBsize)
1257 while (!(bp = xlog_get_bp(log, bufblks))) {
1259 if (bufblks < sectbb)
1263 /* We may need to do a read at the start to fill in part of
1264 * the buffer in the starting sector not covered by the first
1267 balign = round_down(start_block, sectbb);
1268 if (balign != start_block) {
1269 error = xlog_bread_noalign(log, start_block, 1, bp);
1273 j = start_block - balign;
1276 for (i = start_block; i < end_block; i += bufblks) {
1277 int bcount, endcount;
1279 bcount = min(bufblks, end_block - start_block);
1280 endcount = bcount - j;
1282 /* We may need to do a read at the end to fill in part of
1283 * the buffer in the final sector not covered by the write.
1284 * If this is the same sector as the above read, skip it.
1286 ealign = round_down(end_block, sectbb);
1287 if (j == 0 && (start_block + endcount > ealign)) {
1288 offset = bp->b_addr + BBTOB(ealign - start_block);
1289 error = xlog_bread_offset(log, ealign, sectbb,
1296 offset = xlog_align(log, start_block, endcount, bp);
1297 for (; j < endcount; j++) {
1298 xlog_add_record(log, offset, cycle, i+j,
1299 tail_cycle, tail_block);
1302 error = xlog_bwrite(log, start_block, endcount, bp);
1305 start_block += endcount;
1315 * This routine is called to blow away any incomplete log writes out
1316 * in front of the log head. We do this so that we won't become confused
1317 * if we come up, write only a little bit more, and then crash again.
1318 * If we leave the partial log records out there, this situation could
1319 * cause us to think those partial writes are valid blocks since they
1320 * have the current cycle number. We get rid of them by overwriting them
1321 * with empty log records with the old cycle number rather than the
1324 * The tail lsn is passed in rather than taken from
1325 * the log so that we will not write over the unmount record after a
1326 * clean unmount in a 512 block log. Doing so would leave the log without
1327 * any valid log records in it until a new one was written. If we crashed
1328 * during that time we would not be able to recover.
1331 xlog_clear_stale_blocks(
1335 int tail_cycle, head_cycle;
1336 int tail_block, head_block;
1337 int tail_distance, max_distance;
1341 tail_cycle = CYCLE_LSN(tail_lsn);
1342 tail_block = BLOCK_LSN(tail_lsn);
1343 head_cycle = log->l_curr_cycle;
1344 head_block = log->l_curr_block;
1347 * Figure out the distance between the new head of the log
1348 * and the tail. We want to write over any blocks beyond the
1349 * head that we may have written just before the crash, but
1350 * we don't want to overwrite the tail of the log.
1352 if (head_cycle == tail_cycle) {
1354 * The tail is behind the head in the physical log,
1355 * so the distance from the head to the tail is the
1356 * distance from the head to the end of the log plus
1357 * the distance from the beginning of the log to the
1360 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1361 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1362 XFS_ERRLEVEL_LOW, log->l_mp);
1363 return XFS_ERROR(EFSCORRUPTED);
1365 tail_distance = tail_block + (log->l_logBBsize - head_block);
1368 * The head is behind the tail in the physical log,
1369 * so the distance from the head to the tail is just
1370 * the tail block minus the head block.
1372 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1373 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1374 XFS_ERRLEVEL_LOW, log->l_mp);
1375 return XFS_ERROR(EFSCORRUPTED);
1377 tail_distance = tail_block - head_block;
1381 * If the head is right up against the tail, we can't clear
1384 if (tail_distance <= 0) {
1385 ASSERT(tail_distance == 0);
1389 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1391 * Take the smaller of the maximum amount of outstanding I/O
1392 * we could have and the distance to the tail to clear out.
1393 * We take the smaller so that we don't overwrite the tail and
1394 * we don't waste all day writing from the head to the tail
1397 max_distance = MIN(max_distance, tail_distance);
1399 if ((head_block + max_distance) <= log->l_logBBsize) {
1401 * We can stomp all the blocks we need to without
1402 * wrapping around the end of the log. Just do it
1403 * in a single write. Use the cycle number of the
1404 * current cycle minus one so that the log will look like:
1407 error = xlog_write_log_records(log, (head_cycle - 1),
1408 head_block, max_distance, tail_cycle,
1414 * We need to wrap around the end of the physical log in
1415 * order to clear all the blocks. Do it in two separate
1416 * I/Os. The first write should be from the head to the
1417 * end of the physical log, and it should use the current
1418 * cycle number minus one just like above.
1420 distance = log->l_logBBsize - head_block;
1421 error = xlog_write_log_records(log, (head_cycle - 1),
1422 head_block, distance, tail_cycle,
1429 * Now write the blocks at the start of the physical log.
1430 * This writes the remainder of the blocks we want to clear.
1431 * It uses the current cycle number since we're now on the
1432 * same cycle as the head so that we get:
1433 * n ... n ... | n - 1 ...
1434 * ^^^^^ blocks we're writing
1436 distance = max_distance - (log->l_logBBsize - head_block);
1437 error = xlog_write_log_records(log, head_cycle, 0, distance,
1438 tail_cycle, tail_block);
1446 /******************************************************************************
1448 * Log recover routines
1450 ******************************************************************************
1453 STATIC xlog_recover_t *
1454 xlog_recover_find_tid(
1455 struct hlist_head *head,
1458 xlog_recover_t *trans;
1460 hlist_for_each_entry(trans, head, r_list) {
1461 if (trans->r_log_tid == tid)
1468 xlog_recover_new_tid(
1469 struct hlist_head *head,
1473 xlog_recover_t *trans;
1475 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1476 trans->r_log_tid = tid;
1478 INIT_LIST_HEAD(&trans->r_itemq);
1480 INIT_HLIST_NODE(&trans->r_list);
1481 hlist_add_head(&trans->r_list, head);
1485 xlog_recover_add_item(
1486 struct list_head *head)
1488 xlog_recover_item_t *item;
1490 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1491 INIT_LIST_HEAD(&item->ri_list);
1492 list_add_tail(&item->ri_list, head);
1496 xlog_recover_add_to_cont_trans(
1498 struct xlog_recover *trans,
1502 xlog_recover_item_t *item;
1503 xfs_caddr_t ptr, old_ptr;
1506 if (list_empty(&trans->r_itemq)) {
1507 /* finish copying rest of trans header */
1508 xlog_recover_add_item(&trans->r_itemq);
1509 ptr = (xfs_caddr_t) &trans->r_theader +
1510 sizeof(xfs_trans_header_t) - len;
1511 memcpy(ptr, dp, len); /* d, s, l */
1514 /* take the tail entry */
1515 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1517 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1518 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1520 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1521 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1522 item->ri_buf[item->ri_cnt-1].i_len += len;
1523 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1524 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1529 * The next region to add is the start of a new region. It could be
1530 * a whole region or it could be the first part of a new region. Because
1531 * of this, the assumption here is that the type and size fields of all
1532 * format structures fit into the first 32 bits of the structure.
1534 * This works because all regions must be 32 bit aligned. Therefore, we
1535 * either have both fields or we have neither field. In the case we have
1536 * neither field, the data part of the region is zero length. We only have
1537 * a log_op_header and can throw away the header since a new one will appear
1538 * later. If we have at least 4 bytes, then we can determine how many regions
1539 * will appear in the current log item.
1542 xlog_recover_add_to_trans(
1544 struct xlog_recover *trans,
1548 xfs_inode_log_format_t *in_f; /* any will do */
1549 xlog_recover_item_t *item;
1554 if (list_empty(&trans->r_itemq)) {
1555 /* we need to catch log corruptions here */
1556 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1557 xfs_warn(log->l_mp, "%s: bad header magic number",
1560 return XFS_ERROR(EIO);
1562 if (len == sizeof(xfs_trans_header_t))
1563 xlog_recover_add_item(&trans->r_itemq);
1564 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1568 ptr = kmem_alloc(len, KM_SLEEP);
1569 memcpy(ptr, dp, len);
1570 in_f = (xfs_inode_log_format_t *)ptr;
1572 /* take the tail entry */
1573 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1574 if (item->ri_total != 0 &&
1575 item->ri_total == item->ri_cnt) {
1576 /* tail item is in use, get a new one */
1577 xlog_recover_add_item(&trans->r_itemq);
1578 item = list_entry(trans->r_itemq.prev,
1579 xlog_recover_item_t, ri_list);
1582 if (item->ri_total == 0) { /* first region to be added */
1583 if (in_f->ilf_size == 0 ||
1584 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1586 "bad number of regions (%d) in inode log format",
1590 return XFS_ERROR(EIO);
1593 item->ri_total = in_f->ilf_size;
1595 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1598 ASSERT(item->ri_total > item->ri_cnt);
1599 /* Description region is ri_buf[0] */
1600 item->ri_buf[item->ri_cnt].i_addr = ptr;
1601 item->ri_buf[item->ri_cnt].i_len = len;
1603 trace_xfs_log_recover_item_add(log, trans, item, 0);
1608 * Sort the log items in the transaction.
1610 * The ordering constraints are defined by the inode allocation and unlink
1611 * behaviour. The rules are:
1613 * 1. Every item is only logged once in a given transaction. Hence it
1614 * represents the last logged state of the item. Hence ordering is
1615 * dependent on the order in which operations need to be performed so
1616 * required initial conditions are always met.
1618 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1619 * there's nothing to replay from them so we can simply cull them
1620 * from the transaction. However, we can't do that until after we've
1621 * replayed all the other items because they may be dependent on the
1622 * cancelled buffer and replaying the cancelled buffer can remove it
1623 * form the cancelled buffer table. Hence they have tobe done last.
1625 * 3. Inode allocation buffers must be replayed before inode items that
1626 * read the buffer and replay changes into it. For filesystems using the
1627 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1628 * treated the same as inode allocation buffers as they create and
1629 * initialise the buffers directly.
1631 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1632 * This ensures that inodes are completely flushed to the inode buffer
1633 * in a "free" state before we remove the unlinked inode list pointer.
1635 * Hence the ordering needs to be inode allocation buffers first, inode items
1636 * second, inode unlink buffers third and cancelled buffers last.
1638 * But there's a problem with that - we can't tell an inode allocation buffer
1639 * apart from a regular buffer, so we can't separate them. We can, however,
1640 * tell an inode unlink buffer from the others, and so we can separate them out
1641 * from all the other buffers and move them to last.
1643 * Hence, 4 lists, in order from head to tail:
1644 * - buffer_list for all buffers except cancelled/inode unlink buffers
1645 * - item_list for all non-buffer items
1646 * - inode_buffer_list for inode unlink buffers
1647 * - cancel_list for the cancelled buffers
1649 * Note that we add objects to the tail of the lists so that first-to-last
1650 * ordering is preserved within the lists. Adding objects to the head of the
1651 * list means when we traverse from the head we walk them in last-to-first
1652 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1653 * but for all other items there may be specific ordering that we need to
1657 xlog_recover_reorder_trans(
1659 struct xlog_recover *trans,
1662 xlog_recover_item_t *item, *n;
1663 LIST_HEAD(sort_list);
1664 LIST_HEAD(cancel_list);
1665 LIST_HEAD(buffer_list);
1666 LIST_HEAD(inode_buffer_list);
1667 LIST_HEAD(inode_list);
1669 list_splice_init(&trans->r_itemq, &sort_list);
1670 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1671 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1673 switch (ITEM_TYPE(item)) {
1674 case XFS_LI_ICREATE:
1675 list_move_tail(&item->ri_list, &buffer_list);
1678 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1679 trace_xfs_log_recover_item_reorder_head(log,
1681 list_move(&item->ri_list, &cancel_list);
1684 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1685 list_move(&item->ri_list, &inode_buffer_list);
1688 list_move_tail(&item->ri_list, &buffer_list);
1692 case XFS_LI_QUOTAOFF:
1695 trace_xfs_log_recover_item_reorder_tail(log,
1697 list_move_tail(&item->ri_list, &inode_list);
1701 "%s: unrecognized type of log operation",
1704 return XFS_ERROR(EIO);
1707 ASSERT(list_empty(&sort_list));
1708 if (!list_empty(&buffer_list))
1709 list_splice(&buffer_list, &trans->r_itemq);
1710 if (!list_empty(&inode_list))
1711 list_splice_tail(&inode_list, &trans->r_itemq);
1712 if (!list_empty(&inode_buffer_list))
1713 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1714 if (!list_empty(&cancel_list))
1715 list_splice_tail(&cancel_list, &trans->r_itemq);
1720 * Build up the table of buf cancel records so that we don't replay
1721 * cancelled data in the second pass. For buffer records that are
1722 * not cancel records, there is nothing to do here so we just return.
1724 * If we get a cancel record which is already in the table, this indicates
1725 * that the buffer was cancelled multiple times. In order to ensure
1726 * that during pass 2 we keep the record in the table until we reach its
1727 * last occurrence in the log, we keep a reference count in the cancel
1728 * record in the table to tell us how many times we expect to see this
1729 * record during the second pass.
1732 xlog_recover_buffer_pass1(
1734 struct xlog_recover_item *item)
1736 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1737 struct list_head *bucket;
1738 struct xfs_buf_cancel *bcp;
1741 * If this isn't a cancel buffer item, then just return.
1743 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1744 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1749 * Insert an xfs_buf_cancel record into the hash table of them.
1750 * If there is already an identical record, bump its reference count.
1752 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1753 list_for_each_entry(bcp, bucket, bc_list) {
1754 if (bcp->bc_blkno == buf_f->blf_blkno &&
1755 bcp->bc_len == buf_f->blf_len) {
1757 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1762 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1763 bcp->bc_blkno = buf_f->blf_blkno;
1764 bcp->bc_len = buf_f->blf_len;
1765 bcp->bc_refcount = 1;
1766 list_add_tail(&bcp->bc_list, bucket);
1768 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1773 * Check to see whether the buffer being recovered has a corresponding
1774 * entry in the buffer cancel record table. If it is, return the cancel
1775 * buffer structure to the caller.
1777 STATIC struct xfs_buf_cancel *
1778 xlog_peek_buffer_cancelled(
1784 struct list_head *bucket;
1785 struct xfs_buf_cancel *bcp;
1787 if (!log->l_buf_cancel_table) {
1788 /* empty table means no cancelled buffers in the log */
1789 ASSERT(!(flags & XFS_BLF_CANCEL));
1793 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1794 list_for_each_entry(bcp, bucket, bc_list) {
1795 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1800 * We didn't find a corresponding entry in the table, so return 0 so
1801 * that the buffer is NOT cancelled.
1803 ASSERT(!(flags & XFS_BLF_CANCEL));
1808 * If the buffer is being cancelled then return 1 so that it will be cancelled,
1809 * otherwise return 0. If the buffer is actually a buffer cancel item
1810 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
1811 * table and remove it from the table if this is the last reference.
1813 * We remove the cancel record from the table when we encounter its last
1814 * occurrence in the log so that if the same buffer is re-used again after its
1815 * last cancellation we actually replay the changes made at that point.
1818 xlog_check_buffer_cancelled(
1824 struct xfs_buf_cancel *bcp;
1826 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
1831 * We've go a match, so return 1 so that the recovery of this buffer
1832 * is cancelled. If this buffer is actually a buffer cancel log
1833 * item, then decrement the refcount on the one in the table and
1834 * remove it if this is the last reference.
1836 if (flags & XFS_BLF_CANCEL) {
1837 if (--bcp->bc_refcount == 0) {
1838 list_del(&bcp->bc_list);
1846 * Perform recovery for a buffer full of inodes. In these buffers, the only
1847 * data which should be recovered is that which corresponds to the
1848 * di_next_unlinked pointers in the on disk inode structures. The rest of the
1849 * data for the inodes is always logged through the inodes themselves rather
1850 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1852 * The only time when buffers full of inodes are fully recovered is when the
1853 * buffer is full of newly allocated inodes. In this case the buffer will
1854 * not be marked as an inode buffer and so will be sent to
1855 * xlog_recover_do_reg_buffer() below during recovery.
1858 xlog_recover_do_inode_buffer(
1859 struct xfs_mount *mp,
1860 xlog_recover_item_t *item,
1862 xfs_buf_log_format_t *buf_f)
1868 int reg_buf_offset = 0;
1869 int reg_buf_bytes = 0;
1870 int next_unlinked_offset;
1872 xfs_agino_t *logged_nextp;
1873 xfs_agino_t *buffer_nextp;
1875 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1878 * Post recovery validation only works properly on CRC enabled
1881 if (xfs_sb_version_hascrc(&mp->m_sb))
1882 bp->b_ops = &xfs_inode_buf_ops;
1884 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1885 for (i = 0; i < inodes_per_buf; i++) {
1886 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1887 offsetof(xfs_dinode_t, di_next_unlinked);
1889 while (next_unlinked_offset >=
1890 (reg_buf_offset + reg_buf_bytes)) {
1892 * The next di_next_unlinked field is beyond
1893 * the current logged region. Find the next
1894 * logged region that contains or is beyond
1895 * the current di_next_unlinked field.
1898 bit = xfs_next_bit(buf_f->blf_data_map,
1899 buf_f->blf_map_size, bit);
1902 * If there are no more logged regions in the
1903 * buffer, then we're done.
1908 nbits = xfs_contig_bits(buf_f->blf_data_map,
1909 buf_f->blf_map_size, bit);
1911 reg_buf_offset = bit << XFS_BLF_SHIFT;
1912 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1917 * If the current logged region starts after the current
1918 * di_next_unlinked field, then move on to the next
1919 * di_next_unlinked field.
1921 if (next_unlinked_offset < reg_buf_offset)
1924 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1925 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1926 ASSERT((reg_buf_offset + reg_buf_bytes) <=
1927 BBTOB(bp->b_io_length));
1930 * The current logged region contains a copy of the
1931 * current di_next_unlinked field. Extract its value
1932 * and copy it to the buffer copy.
1934 logged_nextp = item->ri_buf[item_index].i_addr +
1935 next_unlinked_offset - reg_buf_offset;
1936 if (unlikely(*logged_nextp == 0)) {
1938 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1939 "Trying to replay bad (0) inode di_next_unlinked field.",
1941 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1942 XFS_ERRLEVEL_LOW, mp);
1943 return XFS_ERROR(EFSCORRUPTED);
1946 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1947 next_unlinked_offset);
1948 *buffer_nextp = *logged_nextp;
1951 * If necessary, recalculate the CRC in the on-disk inode. We
1952 * have to leave the inode in a consistent state for whoever
1955 xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1956 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1964 * V5 filesystems know the age of the buffer on disk being recovered. We can
1965 * have newer objects on disk than we are replaying, and so for these cases we
1966 * don't want to replay the current change as that will make the buffer contents
1967 * temporarily invalid on disk.
1969 * The magic number might not match the buffer type we are going to recover
1970 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
1971 * extract the LSN of the existing object in the buffer based on it's current
1972 * magic number. If we don't recognise the magic number in the buffer, then
1973 * return a LSN of -1 so that the caller knows it was an unrecognised block and
1974 * so can recover the buffer.
1976 * Note: we cannot rely solely on magic number matches to determine that the
1977 * buffer has a valid LSN - we also need to verify that it belongs to this
1978 * filesystem, so we need to extract the object's LSN and compare it to that
1979 * which we read from the superblock. If the UUIDs don't match, then we've got a
1980 * stale metadata block from an old filesystem instance that we need to recover
1984 xlog_recover_get_buf_lsn(
1985 struct xfs_mount *mp,
1991 void *blk = bp->b_addr;
1995 /* v4 filesystems always recover immediately */
1996 if (!xfs_sb_version_hascrc(&mp->m_sb))
1997 goto recover_immediately;
1999 magic32 = be32_to_cpu(*(__be32 *)blk);
2001 case XFS_ABTB_CRC_MAGIC:
2002 case XFS_ABTC_CRC_MAGIC:
2003 case XFS_ABTB_MAGIC:
2004 case XFS_ABTC_MAGIC:
2005 case XFS_IBT_CRC_MAGIC:
2006 case XFS_IBT_MAGIC: {
2007 struct xfs_btree_block *btb = blk;
2009 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2010 uuid = &btb->bb_u.s.bb_uuid;
2013 case XFS_BMAP_CRC_MAGIC:
2014 case XFS_BMAP_MAGIC: {
2015 struct xfs_btree_block *btb = blk;
2017 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2018 uuid = &btb->bb_u.l.bb_uuid;
2022 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2023 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2025 case XFS_AGFL_MAGIC:
2026 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2027 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2030 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2031 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2033 case XFS_SYMLINK_MAGIC:
2034 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2035 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2037 case XFS_DIR3_BLOCK_MAGIC:
2038 case XFS_DIR3_DATA_MAGIC:
2039 case XFS_DIR3_FREE_MAGIC:
2040 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2041 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2043 case XFS_ATTR3_RMT_MAGIC:
2044 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
2045 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
2048 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2049 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2055 if (lsn != (xfs_lsn_t)-1) {
2056 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2057 goto recover_immediately;
2061 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2063 case XFS_DIR3_LEAF1_MAGIC:
2064 case XFS_DIR3_LEAFN_MAGIC:
2065 case XFS_DA3_NODE_MAGIC:
2066 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2067 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2073 if (lsn != (xfs_lsn_t)-1) {
2074 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2075 goto recover_immediately;
2080 * We do individual object checks on dquot and inode buffers as they
2081 * have their own individual LSN records. Also, we could have a stale
2082 * buffer here, so we have to at least recognise these buffer types.
2084 * A notd complexity here is inode unlinked list processing - it logs
2085 * the inode directly in the buffer, but we don't know which inodes have
2086 * been modified, and there is no global buffer LSN. Hence we need to
2087 * recover all inode buffer types immediately. This problem will be
2088 * fixed by logical logging of the unlinked list modifications.
2090 magic16 = be16_to_cpu(*(__be16 *)blk);
2092 case XFS_DQUOT_MAGIC:
2093 case XFS_DINODE_MAGIC:
2094 goto recover_immediately;
2099 /* unknown buffer contents, recover immediately */
2101 recover_immediately:
2102 return (xfs_lsn_t)-1;
2107 * Validate the recovered buffer is of the correct type and attach the
2108 * appropriate buffer operations to them for writeback. Magic numbers are in a
2110 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2111 * the first 32 bits of the buffer (most blocks),
2112 * inside a struct xfs_da_blkinfo at the start of the buffer.
2115 xlog_recover_validate_buf_type(
2116 struct xfs_mount *mp,
2118 xfs_buf_log_format_t *buf_f)
2120 struct xfs_da_blkinfo *info = bp->b_addr;
2125 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2126 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2127 magicda = be16_to_cpu(info->magic);
2128 switch (xfs_blft_from_flags(buf_f)) {
2129 case XFS_BLFT_BTREE_BUF:
2131 case XFS_ABTB_CRC_MAGIC:
2132 case XFS_ABTC_CRC_MAGIC:
2133 case XFS_ABTB_MAGIC:
2134 case XFS_ABTC_MAGIC:
2135 bp->b_ops = &xfs_allocbt_buf_ops;
2137 case XFS_IBT_CRC_MAGIC:
2139 bp->b_ops = &xfs_inobt_buf_ops;
2141 case XFS_BMAP_CRC_MAGIC:
2142 case XFS_BMAP_MAGIC:
2143 bp->b_ops = &xfs_bmbt_buf_ops;
2146 xfs_warn(mp, "Bad btree block magic!");
2151 case XFS_BLFT_AGF_BUF:
2152 if (magic32 != XFS_AGF_MAGIC) {
2153 xfs_warn(mp, "Bad AGF block magic!");
2157 bp->b_ops = &xfs_agf_buf_ops;
2159 case XFS_BLFT_AGFL_BUF:
2160 if (!xfs_sb_version_hascrc(&mp->m_sb))
2162 if (magic32 != XFS_AGFL_MAGIC) {
2163 xfs_warn(mp, "Bad AGFL block magic!");
2167 bp->b_ops = &xfs_agfl_buf_ops;
2169 case XFS_BLFT_AGI_BUF:
2170 if (magic32 != XFS_AGI_MAGIC) {
2171 xfs_warn(mp, "Bad AGI block magic!");
2175 bp->b_ops = &xfs_agi_buf_ops;
2177 case XFS_BLFT_UDQUOT_BUF:
2178 case XFS_BLFT_PDQUOT_BUF:
2179 case XFS_BLFT_GDQUOT_BUF:
2180 #ifdef CONFIG_XFS_QUOTA
2181 if (magic16 != XFS_DQUOT_MAGIC) {
2182 xfs_warn(mp, "Bad DQUOT block magic!");
2186 bp->b_ops = &xfs_dquot_buf_ops;
2189 "Trying to recover dquots without QUOTA support built in!");
2193 case XFS_BLFT_DINO_BUF:
2195 * we get here with inode allocation buffers, not buffers that
2196 * track unlinked list changes.
2198 if (magic16 != XFS_DINODE_MAGIC) {
2199 xfs_warn(mp, "Bad INODE block magic!");
2203 bp->b_ops = &xfs_inode_buf_ops;
2205 case XFS_BLFT_SYMLINK_BUF:
2206 if (magic32 != XFS_SYMLINK_MAGIC) {
2207 xfs_warn(mp, "Bad symlink block magic!");
2211 bp->b_ops = &xfs_symlink_buf_ops;
2213 case XFS_BLFT_DIR_BLOCK_BUF:
2214 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2215 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2216 xfs_warn(mp, "Bad dir block magic!");
2220 bp->b_ops = &xfs_dir3_block_buf_ops;
2222 case XFS_BLFT_DIR_DATA_BUF:
2223 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2224 magic32 != XFS_DIR3_DATA_MAGIC) {
2225 xfs_warn(mp, "Bad dir data magic!");
2229 bp->b_ops = &xfs_dir3_data_buf_ops;
2231 case XFS_BLFT_DIR_FREE_BUF:
2232 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2233 magic32 != XFS_DIR3_FREE_MAGIC) {
2234 xfs_warn(mp, "Bad dir3 free magic!");
2238 bp->b_ops = &xfs_dir3_free_buf_ops;
2240 case XFS_BLFT_DIR_LEAF1_BUF:
2241 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2242 magicda != XFS_DIR3_LEAF1_MAGIC) {
2243 xfs_warn(mp, "Bad dir leaf1 magic!");
2247 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2249 case XFS_BLFT_DIR_LEAFN_BUF:
2250 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2251 magicda != XFS_DIR3_LEAFN_MAGIC) {
2252 xfs_warn(mp, "Bad dir leafn magic!");
2256 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2258 case XFS_BLFT_DA_NODE_BUF:
2259 if (magicda != XFS_DA_NODE_MAGIC &&
2260 magicda != XFS_DA3_NODE_MAGIC) {
2261 xfs_warn(mp, "Bad da node magic!");
2265 bp->b_ops = &xfs_da3_node_buf_ops;
2267 case XFS_BLFT_ATTR_LEAF_BUF:
2268 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2269 magicda != XFS_ATTR3_LEAF_MAGIC) {
2270 xfs_warn(mp, "Bad attr leaf magic!");
2274 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2276 case XFS_BLFT_ATTR_RMT_BUF:
2277 if (!xfs_sb_version_hascrc(&mp->m_sb))
2279 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2280 xfs_warn(mp, "Bad attr remote magic!");
2284 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2286 case XFS_BLFT_SB_BUF:
2287 if (magic32 != XFS_SB_MAGIC) {
2288 xfs_warn(mp, "Bad SB block magic!");
2292 bp->b_ops = &xfs_sb_buf_ops;
2295 xfs_warn(mp, "Unknown buffer type %d!",
2296 xfs_blft_from_flags(buf_f));
2302 * Perform a 'normal' buffer recovery. Each logged region of the
2303 * buffer should be copied over the corresponding region in the
2304 * given buffer. The bitmap in the buf log format structure indicates
2305 * where to place the logged data.
2308 xlog_recover_do_reg_buffer(
2309 struct xfs_mount *mp,
2310 xlog_recover_item_t *item,
2312 xfs_buf_log_format_t *buf_f)
2319 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2322 i = 1; /* 0 is the buf format structure */
2324 bit = xfs_next_bit(buf_f->blf_data_map,
2325 buf_f->blf_map_size, bit);
2328 nbits = xfs_contig_bits(buf_f->blf_data_map,
2329 buf_f->blf_map_size, bit);
2331 ASSERT(item->ri_buf[i].i_addr != NULL);
2332 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2333 ASSERT(BBTOB(bp->b_io_length) >=
2334 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2337 * The dirty regions logged in the buffer, even though
2338 * contiguous, may span multiple chunks. This is because the
2339 * dirty region may span a physical page boundary in a buffer
2340 * and hence be split into two separate vectors for writing into
2341 * the log. Hence we need to trim nbits back to the length of
2342 * the current region being copied out of the log.
2344 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2345 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2348 * Do a sanity check if this is a dquot buffer. Just checking
2349 * the first dquot in the buffer should do. XXXThis is
2350 * probably a good thing to do for other buf types also.
2353 if (buf_f->blf_flags &
2354 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2355 if (item->ri_buf[i].i_addr == NULL) {
2357 "XFS: NULL dquot in %s.", __func__);
2360 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2362 "XFS: dquot too small (%d) in %s.",
2363 item->ri_buf[i].i_len, __func__);
2366 error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
2367 -1, 0, XFS_QMOPT_DOWARN,
2368 "dquot_buf_recover");
2373 memcpy(xfs_buf_offset(bp,
2374 (uint)bit << XFS_BLF_SHIFT), /* dest */
2375 item->ri_buf[i].i_addr, /* source */
2376 nbits<<XFS_BLF_SHIFT); /* length */
2382 /* Shouldn't be any more regions */
2383 ASSERT(i == item->ri_total);
2386 * We can only do post recovery validation on items on CRC enabled
2387 * fielsystems as we need to know when the buffer was written to be able
2388 * to determine if we should have replayed the item. If we replay old
2389 * metadata over a newer buffer, then it will enter a temporarily
2390 * inconsistent state resulting in verification failures. Hence for now
2391 * just avoid the verification stage for non-crc filesystems
2393 if (xfs_sb_version_hascrc(&mp->m_sb))
2394 xlog_recover_validate_buf_type(mp, bp, buf_f);
2398 * Do some primitive error checking on ondisk dquot data structures.
2402 struct xfs_mount *mp,
2403 xfs_disk_dquot_t *ddq,
2405 uint type, /* used only when IO_dorepair is true */
2409 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
2413 * We can encounter an uninitialized dquot buffer for 2 reasons:
2414 * 1. If we crash while deleting the quotainode(s), and those blks got
2415 * used for user data. This is because we take the path of regular
2416 * file deletion; however, the size field of quotainodes is never
2417 * updated, so all the tricks that we play in itruncate_finish
2418 * don't quite matter.
2420 * 2. We don't play the quota buffers when there's a quotaoff logitem.
2421 * But the allocation will be replayed so we'll end up with an
2422 * uninitialized quota block.
2424 * This is all fine; things are still consistent, and we haven't lost
2425 * any quota information. Just don't complain about bad dquot blks.
2427 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
2428 if (flags & XFS_QMOPT_DOWARN)
2430 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2431 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2434 if (ddq->d_version != XFS_DQUOT_VERSION) {
2435 if (flags & XFS_QMOPT_DOWARN)
2437 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2438 str, id, ddq->d_version, XFS_DQUOT_VERSION);
2442 if (ddq->d_flags != XFS_DQ_USER &&
2443 ddq->d_flags != XFS_DQ_PROJ &&
2444 ddq->d_flags != XFS_DQ_GROUP) {
2445 if (flags & XFS_QMOPT_DOWARN)
2447 "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2448 str, id, ddq->d_flags);
2452 if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2453 if (flags & XFS_QMOPT_DOWARN)
2455 "%s : ondisk-dquot 0x%p, ID mismatch: "
2456 "0x%x expected, found id 0x%x",
2457 str, ddq, id, be32_to_cpu(ddq->d_id));
2461 if (!errs && ddq->d_id) {
2462 if (ddq->d_blk_softlimit &&
2463 be64_to_cpu(ddq->d_bcount) >
2464 be64_to_cpu(ddq->d_blk_softlimit)) {
2465 if (!ddq->d_btimer) {
2466 if (flags & XFS_QMOPT_DOWARN)
2468 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2469 str, (int)be32_to_cpu(ddq->d_id), ddq);
2473 if (ddq->d_ino_softlimit &&
2474 be64_to_cpu(ddq->d_icount) >
2475 be64_to_cpu(ddq->d_ino_softlimit)) {
2476 if (!ddq->d_itimer) {
2477 if (flags & XFS_QMOPT_DOWARN)
2479 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2480 str, (int)be32_to_cpu(ddq->d_id), ddq);
2484 if (ddq->d_rtb_softlimit &&
2485 be64_to_cpu(ddq->d_rtbcount) >
2486 be64_to_cpu(ddq->d_rtb_softlimit)) {
2487 if (!ddq->d_rtbtimer) {
2488 if (flags & XFS_QMOPT_DOWARN)
2490 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2491 str, (int)be32_to_cpu(ddq->d_id), ddq);
2497 if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2500 if (flags & XFS_QMOPT_DOWARN)
2501 xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2504 * Typically, a repair is only requested by quotacheck.
2507 ASSERT(flags & XFS_QMOPT_DQREPAIR);
2508 memset(d, 0, sizeof(xfs_dqblk_t));
2510 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2511 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2512 d->dd_diskdq.d_flags = type;
2513 d->dd_diskdq.d_id = cpu_to_be32(id);
2515 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2516 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
2517 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2525 * Perform a dquot buffer recovery.
2526 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2527 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2528 * Else, treat it as a regular buffer and do recovery.
2531 xlog_recover_do_dquot_buffer(
2532 struct xfs_mount *mp,
2534 struct xlog_recover_item *item,
2536 struct xfs_buf_log_format *buf_f)
2540 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2543 * Filesystems are required to send in quota flags at mount time.
2545 if (mp->m_qflags == 0) {
2550 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2551 type |= XFS_DQ_USER;
2552 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2553 type |= XFS_DQ_PROJ;
2554 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2555 type |= XFS_DQ_GROUP;
2557 * This type of quotas was turned off, so ignore this buffer
2559 if (log->l_quotaoffs_flag & type)
2562 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2566 * This routine replays a modification made to a buffer at runtime.
2567 * There are actually two types of buffer, regular and inode, which
2568 * are handled differently. Inode buffers are handled differently
2569 * in that we only recover a specific set of data from them, namely
2570 * the inode di_next_unlinked fields. This is because all other inode
2571 * data is actually logged via inode records and any data we replay
2572 * here which overlaps that may be stale.
2574 * When meta-data buffers are freed at run time we log a buffer item
2575 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2576 * of the buffer in the log should not be replayed at recovery time.
2577 * This is so that if the blocks covered by the buffer are reused for
2578 * file data before we crash we don't end up replaying old, freed
2579 * meta-data into a user's file.
2581 * To handle the cancellation of buffer log items, we make two passes
2582 * over the log during recovery. During the first we build a table of
2583 * those buffers which have been cancelled, and during the second we
2584 * only replay those buffers which do not have corresponding cancel
2585 * records in the table. See xlog_recover_buffer_pass[1,2] above
2586 * for more details on the implementation of the table of cancel records.
2589 xlog_recover_buffer_pass2(
2591 struct list_head *buffer_list,
2592 struct xlog_recover_item *item,
2593 xfs_lsn_t current_lsn)
2595 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2596 xfs_mount_t *mp = log->l_mp;
2603 * In this pass we only want to recover all the buffers which have
2604 * not been cancelled and are not cancellation buffers themselves.
2606 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2607 buf_f->blf_len, buf_f->blf_flags)) {
2608 trace_xfs_log_recover_buf_cancel(log, buf_f);
2612 trace_xfs_log_recover_buf_recover(log, buf_f);
2615 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2616 buf_flags |= XBF_UNMAPPED;
2618 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2621 return XFS_ERROR(ENOMEM);
2622 error = bp->b_error;
2624 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2629 * recover the buffer only if we get an LSN from it and it's less than
2630 * the lsn of the transaction we are replaying.
2632 lsn = xlog_recover_get_buf_lsn(mp, bp);
2633 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
2636 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2637 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2638 } else if (buf_f->blf_flags &
2639 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2640 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2642 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2648 * Perform delayed write on the buffer. Asynchronous writes will be
2649 * slower when taking into account all the buffers to be flushed.
2651 * Also make sure that only inode buffers with good sizes stay in
2652 * the buffer cache. The kernel moves inodes in buffers of 1 block
2653 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
2654 * buffers in the log can be a different size if the log was generated
2655 * by an older kernel using unclustered inode buffers or a newer kernel
2656 * running with a different inode cluster size. Regardless, if the
2657 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2658 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2659 * the buffer out of the buffer cache so that the buffer won't
2660 * overlap with future reads of those inodes.
2662 if (XFS_DINODE_MAGIC ==
2663 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2664 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2665 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2667 error = xfs_bwrite(bp);
2669 ASSERT(bp->b_target->bt_mount == mp);
2670 bp->b_iodone = xlog_recover_iodone;
2671 xfs_buf_delwri_queue(bp, buffer_list);
2680 * Inode fork owner changes
2682 * If we have been told that we have to reparent the inode fork, it's because an
2683 * extent swap operation on a CRC enabled filesystem has been done and we are
2684 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2687 * The complexity here is that we don't have an inode context to work with, so
2688 * after we've replayed the inode we need to instantiate one. This is where the
2691 * We are in the middle of log recovery, so we can't run transactions. That
2692 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2693 * that will result in the corresponding iput() running the inode through
2694 * xfs_inactive(). If we've just replayed an inode core that changes the link
2695 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2696 * transactions (bad!).
2698 * So, to avoid this, we instantiate an inode directly from the inode core we've
2699 * just recovered. We have the buffer still locked, and all we really need to
2700 * instantiate is the inode core and the forks being modified. We can do this
2701 * manually, then run the inode btree owner change, and then tear down the
2702 * xfs_inode without having to run any transactions at all.
2704 * Also, because we don't have a transaction context available here but need to
2705 * gather all the buffers we modify for writeback so we pass the buffer_list
2706 * instead for the operation to use.
2710 xfs_recover_inode_owner_change(
2711 struct xfs_mount *mp,
2712 struct xfs_dinode *dip,
2713 struct xfs_inode_log_format *in_f,
2714 struct list_head *buffer_list)
2716 struct xfs_inode *ip;
2719 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2721 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2725 /* instantiate the inode */
2726 xfs_dinode_from_disk(&ip->i_d, dip);
2727 ASSERT(ip->i_d.di_version >= 3);
2729 error = xfs_iformat_fork(ip, dip);
2734 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2735 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2736 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2737 ip->i_ino, buffer_list);
2742 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2743 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2744 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2745 ip->i_ino, buffer_list);
2756 xlog_recover_inode_pass2(
2758 struct list_head *buffer_list,
2759 struct xlog_recover_item *item,
2760 xfs_lsn_t current_lsn)
2762 xfs_inode_log_format_t *in_f;
2763 xfs_mount_t *mp = log->l_mp;
2772 xfs_icdinode_t *dicp;
2776 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2777 in_f = item->ri_buf[0].i_addr;
2779 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2781 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2787 * Inode buffers can be freed, look out for it,
2788 * and do not replay the inode.
2790 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2791 in_f->ilf_len, 0)) {
2793 trace_xfs_log_recover_inode_cancel(log, in_f);
2796 trace_xfs_log_recover_inode_recover(log, in_f);
2798 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2799 &xfs_inode_buf_ops);
2804 error = bp->b_error;
2806 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2809 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2810 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2813 * Make sure the place we're flushing out to really looks
2816 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2818 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2819 __func__, dip, bp, in_f->ilf_ino);
2820 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2821 XFS_ERRLEVEL_LOW, mp);
2822 error = EFSCORRUPTED;
2825 dicp = item->ri_buf[1].i_addr;
2826 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2828 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2829 __func__, item, in_f->ilf_ino);
2830 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2831 XFS_ERRLEVEL_LOW, mp);
2832 error = EFSCORRUPTED;
2837 * If the inode has an LSN in it, recover the inode only if it's less
2838 * than the lsn of the transaction we are replaying. Note: we still
2839 * need to replay an owner change even though the inode is more recent
2840 * than the transaction as there is no guarantee that all the btree
2841 * blocks are more recent than this transaction, too.
2843 if (dip->di_version >= 3) {
2844 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
2846 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2847 trace_xfs_log_recover_inode_skip(log, in_f);
2849 goto out_owner_change;
2854 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2855 * are transactional and if ordering is necessary we can determine that
2856 * more accurately by the LSN field in the V3 inode core. Don't trust
2857 * the inode versions we might be changing them here - use the
2858 * superblock flag to determine whether we need to look at di_flushiter
2859 * to skip replay when the on disk inode is newer than the log one
2861 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2862 dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2864 * Deal with the wrap case, DI_MAX_FLUSH is less
2865 * than smaller numbers
2867 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2868 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2871 trace_xfs_log_recover_inode_skip(log, in_f);
2877 /* Take the opportunity to reset the flush iteration count */
2878 dicp->di_flushiter = 0;
2880 if (unlikely(S_ISREG(dicp->di_mode))) {
2881 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2882 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2883 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2884 XFS_ERRLEVEL_LOW, mp, dicp);
2886 "%s: Bad regular inode log record, rec ptr 0x%p, "
2887 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2888 __func__, item, dip, bp, in_f->ilf_ino);
2889 error = EFSCORRUPTED;
2892 } else if (unlikely(S_ISDIR(dicp->di_mode))) {
2893 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2894 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2895 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2896 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2897 XFS_ERRLEVEL_LOW, mp, dicp);
2899 "%s: Bad dir inode log record, rec ptr 0x%p, "
2900 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2901 __func__, item, dip, bp, in_f->ilf_ino);
2902 error = EFSCORRUPTED;
2906 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2907 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2908 XFS_ERRLEVEL_LOW, mp, dicp);
2910 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2911 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2912 __func__, item, dip, bp, in_f->ilf_ino,
2913 dicp->di_nextents + dicp->di_anextents,
2915 error = EFSCORRUPTED;
2918 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2919 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2920 XFS_ERRLEVEL_LOW, mp, dicp);
2922 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2923 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2924 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2925 error = EFSCORRUPTED;
2928 isize = xfs_icdinode_size(dicp->di_version);
2929 if (unlikely(item->ri_buf[1].i_len > isize)) {
2930 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2931 XFS_ERRLEVEL_LOW, mp, dicp);
2933 "%s: Bad inode log record length %d, rec ptr 0x%p",
2934 __func__, item->ri_buf[1].i_len, item);
2935 error = EFSCORRUPTED;
2939 /* The core is in in-core format */
2940 xfs_dinode_to_disk(dip, dicp);
2942 /* the rest is in on-disk format */
2943 if (item->ri_buf[1].i_len > isize) {
2944 memcpy((char *)dip + isize,
2945 item->ri_buf[1].i_addr + isize,
2946 item->ri_buf[1].i_len - isize);
2949 fields = in_f->ilf_fields;
2950 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2952 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2955 memcpy(XFS_DFORK_DPTR(dip),
2956 &in_f->ilf_u.ilfu_uuid,
2961 if (in_f->ilf_size == 2)
2962 goto out_owner_change;
2963 len = item->ri_buf[2].i_len;
2964 src = item->ri_buf[2].i_addr;
2965 ASSERT(in_f->ilf_size <= 4);
2966 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2967 ASSERT(!(fields & XFS_ILOG_DFORK) ||
2968 (len == in_f->ilf_dsize));
2970 switch (fields & XFS_ILOG_DFORK) {
2971 case XFS_ILOG_DDATA:
2973 memcpy(XFS_DFORK_DPTR(dip), src, len);
2976 case XFS_ILOG_DBROOT:
2977 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2978 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2979 XFS_DFORK_DSIZE(dip, mp));
2984 * There are no data fork flags set.
2986 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2991 * If we logged any attribute data, recover it. There may or
2992 * may not have been any other non-core data logged in this
2995 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2996 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
3001 len = item->ri_buf[attr_index].i_len;
3002 src = item->ri_buf[attr_index].i_addr;
3003 ASSERT(len == in_f->ilf_asize);
3005 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3006 case XFS_ILOG_ADATA:
3008 dest = XFS_DFORK_APTR(dip);
3009 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3010 memcpy(dest, src, len);
3013 case XFS_ILOG_ABROOT:
3014 dest = XFS_DFORK_APTR(dip);
3015 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3016 len, (xfs_bmdr_block_t*)dest,
3017 XFS_DFORK_ASIZE(dip, mp));
3021 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3029 if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3030 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3032 /* re-generate the checksum. */
3033 xfs_dinode_calc_crc(log->l_mp, dip);
3035 ASSERT(bp->b_target->bt_mount == mp);
3036 bp->b_iodone = xlog_recover_iodone;
3037 xfs_buf_delwri_queue(bp, buffer_list);
3044 return XFS_ERROR(error);
3048 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3049 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3053 xlog_recover_quotaoff_pass1(
3055 struct xlog_recover_item *item)
3057 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3061 * The logitem format's flag tells us if this was user quotaoff,
3062 * group/project quotaoff or both.
3064 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3065 log->l_quotaoffs_flag |= XFS_DQ_USER;
3066 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3067 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3068 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3069 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3075 * Recover a dquot record
3078 xlog_recover_dquot_pass2(
3080 struct list_head *buffer_list,
3081 struct xlog_recover_item *item,
3082 xfs_lsn_t current_lsn)
3084 xfs_mount_t *mp = log->l_mp;
3086 struct xfs_disk_dquot *ddq, *recddq;
3088 xfs_dq_logformat_t *dq_f;
3093 * Filesystems are required to send in quota flags at mount time.
3095 if (mp->m_qflags == 0)
3098 recddq = item->ri_buf[1].i_addr;
3099 if (recddq == NULL) {
3100 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3101 return XFS_ERROR(EIO);
3103 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3104 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3105 item->ri_buf[1].i_len, __func__);
3106 return XFS_ERROR(EIO);
3110 * This type of quotas was turned off, so ignore this record.
3112 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3114 if (log->l_quotaoffs_flag & type)
3118 * At this point we know that quota was _not_ turned off.
3119 * Since the mount flags are not indicating to us otherwise, this
3120 * must mean that quota is on, and the dquot needs to be replayed.
3121 * Remember that we may not have fully recovered the superblock yet,
3122 * so we can't do the usual trick of looking at the SB quota bits.
3124 * The other possibility, of course, is that the quota subsystem was
3125 * removed since the last mount - ENOSYS.
3127 dq_f = item->ri_buf[0].i_addr;
3129 error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3130 "xlog_recover_dquot_pass2 (log copy)");
3132 return XFS_ERROR(EIO);
3133 ASSERT(dq_f->qlf_len == 1);
3135 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3136 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3142 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
3145 * At least the magic num portion should be on disk because this
3146 * was among a chunk of dquots created earlier, and we did some
3147 * minimal initialization then.
3149 error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3150 "xlog_recover_dquot_pass2");
3153 return XFS_ERROR(EIO);
3157 * If the dquot has an LSN in it, recover the dquot only if it's less
3158 * than the lsn of the transaction we are replaying.
3160 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3161 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3162 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3164 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3169 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3170 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3171 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3175 ASSERT(dq_f->qlf_size == 2);
3176 ASSERT(bp->b_target->bt_mount == mp);
3177 bp->b_iodone = xlog_recover_iodone;
3178 xfs_buf_delwri_queue(bp, buffer_list);
3186 * This routine is called to create an in-core extent free intent
3187 * item from the efi format structure which was logged on disk.
3188 * It allocates an in-core efi, copies the extents from the format
3189 * structure into it, and adds the efi to the AIL with the given
3193 xlog_recover_efi_pass2(
3195 struct xlog_recover_item *item,
3199 xfs_mount_t *mp = log->l_mp;
3200 xfs_efi_log_item_t *efip;
3201 xfs_efi_log_format_t *efi_formatp;
3203 efi_formatp = item->ri_buf[0].i_addr;
3205 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3206 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
3207 &(efip->efi_format)))) {
3208 xfs_efi_item_free(efip);
3211 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3213 spin_lock(&log->l_ailp->xa_lock);
3215 * xfs_trans_ail_update() drops the AIL lock.
3217 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3223 * This routine is called when an efd format structure is found in
3224 * a committed transaction in the log. It's purpose is to cancel
3225 * the corresponding efi if it was still in the log. To do this
3226 * it searches the AIL for the efi with an id equal to that in the
3227 * efd format structure. If we find it, we remove the efi from the
3231 xlog_recover_efd_pass2(
3233 struct xlog_recover_item *item)
3235 xfs_efd_log_format_t *efd_formatp;
3236 xfs_efi_log_item_t *efip = NULL;
3237 xfs_log_item_t *lip;
3239 struct xfs_ail_cursor cur;
3240 struct xfs_ail *ailp = log->l_ailp;
3242 efd_formatp = item->ri_buf[0].i_addr;
3243 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3244 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3245 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3246 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3247 efi_id = efd_formatp->efd_efi_id;
3250 * Search for the efi with the id in the efd format structure
3253 spin_lock(&ailp->xa_lock);
3254 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3255 while (lip != NULL) {
3256 if (lip->li_type == XFS_LI_EFI) {
3257 efip = (xfs_efi_log_item_t *)lip;
3258 if (efip->efi_format.efi_id == efi_id) {
3260 * xfs_trans_ail_delete() drops the
3263 xfs_trans_ail_delete(ailp, lip,
3264 SHUTDOWN_CORRUPT_INCORE);
3265 xfs_efi_item_free(efip);
3266 spin_lock(&ailp->xa_lock);
3270 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3272 xfs_trans_ail_cursor_done(ailp, &cur);
3273 spin_unlock(&ailp->xa_lock);
3279 * This routine is called when an inode create format structure is found in a
3280 * committed transaction in the log. It's purpose is to initialise the inodes
3281 * being allocated on disk. This requires us to get inode cluster buffers that
3282 * match the range to be intialised, stamped with inode templates and written
3283 * by delayed write so that subsequent modifications will hit the cached buffer
3284 * and only need writing out at the end of recovery.
3287 xlog_recover_do_icreate_pass2(
3289 struct list_head *buffer_list,
3290 xlog_recover_item_t *item)
3292 struct xfs_mount *mp = log->l_mp;
3293 struct xfs_icreate_log *icl;
3294 xfs_agnumber_t agno;
3295 xfs_agblock_t agbno;
3298 xfs_agblock_t length;
3300 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3301 if (icl->icl_type != XFS_LI_ICREATE) {
3302 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3306 if (icl->icl_size != 1) {
3307 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3311 agno = be32_to_cpu(icl->icl_ag);
3312 if (agno >= mp->m_sb.sb_agcount) {
3313 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3316 agbno = be32_to_cpu(icl->icl_agbno);
3317 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3318 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3321 isize = be32_to_cpu(icl->icl_isize);
3322 if (isize != mp->m_sb.sb_inodesize) {
3323 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3326 count = be32_to_cpu(icl->icl_count);
3328 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3331 length = be32_to_cpu(icl->icl_length);
3332 if (!length || length >= mp->m_sb.sb_agblocks) {
3333 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3337 /* existing allocation is fixed value */
3338 ASSERT(count == XFS_IALLOC_INODES(mp));
3339 ASSERT(length == XFS_IALLOC_BLOCKS(mp));
3340 if (count != XFS_IALLOC_INODES(mp) ||
3341 length != XFS_IALLOC_BLOCKS(mp)) {
3342 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
3347 * Inode buffers can be freed. Do not replay the inode initialisation as
3348 * we could be overwriting something written after this inode buffer was
3351 * XXX: we need to iterate all buffers and only init those that are not
3352 * cancelled. I think that a more fine grained factoring of
3353 * xfs_ialloc_inode_init may be appropriate here to enable this to be
3356 if (xlog_check_buffer_cancelled(log,
3357 XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
3360 xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
3361 be32_to_cpu(icl->icl_gen));
3366 * Free up any resources allocated by the transaction
3368 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3371 xlog_recover_free_trans(
3372 struct xlog_recover *trans)
3374 xlog_recover_item_t *item, *n;
3377 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3378 /* Free the regions in the item. */
3379 list_del(&item->ri_list);
3380 for (i = 0; i < item->ri_cnt; i++)
3381 kmem_free(item->ri_buf[i].i_addr);
3382 /* Free the item itself */
3383 kmem_free(item->ri_buf);
3386 /* Free the transaction recover structure */
3391 xlog_recover_buffer_ra_pass2(
3393 struct xlog_recover_item *item)
3395 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3396 struct xfs_mount *mp = log->l_mp;
3398 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3399 buf_f->blf_len, buf_f->blf_flags)) {
3403 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3404 buf_f->blf_len, NULL);
3408 xlog_recover_inode_ra_pass2(
3410 struct xlog_recover_item *item)
3412 struct xfs_inode_log_format ilf_buf;
3413 struct xfs_inode_log_format *ilfp;
3414 struct xfs_mount *mp = log->l_mp;
3417 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3418 ilfp = item->ri_buf[0].i_addr;
3421 memset(ilfp, 0, sizeof(*ilfp));
3422 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3427 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3430 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3431 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3435 xlog_recover_dquot_ra_pass2(
3437 struct xlog_recover_item *item)
3439 struct xfs_mount *mp = log->l_mp;
3440 struct xfs_disk_dquot *recddq;
3441 struct xfs_dq_logformat *dq_f;
3445 if (mp->m_qflags == 0)
3448 recddq = item->ri_buf[1].i_addr;
3451 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3454 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3456 if (log->l_quotaoffs_flag & type)
3459 dq_f = item->ri_buf[0].i_addr;
3461 ASSERT(dq_f->qlf_len == 1);
3463 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
3464 XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
3468 xlog_recover_ra_pass2(
3470 struct xlog_recover_item *item)
3472 switch (ITEM_TYPE(item)) {
3474 xlog_recover_buffer_ra_pass2(log, item);
3477 xlog_recover_inode_ra_pass2(log, item);
3480 xlog_recover_dquot_ra_pass2(log, item);
3484 case XFS_LI_QUOTAOFF:
3491 xlog_recover_commit_pass1(
3493 struct xlog_recover *trans,
3494 struct xlog_recover_item *item)
3496 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3498 switch (ITEM_TYPE(item)) {
3500 return xlog_recover_buffer_pass1(log, item);
3501 case XFS_LI_QUOTAOFF:
3502 return xlog_recover_quotaoff_pass1(log, item);
3507 case XFS_LI_ICREATE:
3508 /* nothing to do in pass 1 */
3511 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3512 __func__, ITEM_TYPE(item));
3514 return XFS_ERROR(EIO);
3519 xlog_recover_commit_pass2(
3521 struct xlog_recover *trans,
3522 struct list_head *buffer_list,
3523 struct xlog_recover_item *item)
3525 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3527 switch (ITEM_TYPE(item)) {
3529 return xlog_recover_buffer_pass2(log, buffer_list, item,
3532 return xlog_recover_inode_pass2(log, buffer_list, item,
3535 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3537 return xlog_recover_efd_pass2(log, item);
3539 return xlog_recover_dquot_pass2(log, buffer_list, item,
3541 case XFS_LI_ICREATE:
3542 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3543 case XFS_LI_QUOTAOFF:
3544 /* nothing to do in pass2 */
3547 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3548 __func__, ITEM_TYPE(item));
3550 return XFS_ERROR(EIO);
3555 xlog_recover_items_pass2(
3557 struct xlog_recover *trans,
3558 struct list_head *buffer_list,
3559 struct list_head *item_list)
3561 struct xlog_recover_item *item;
3564 list_for_each_entry(item, item_list, ri_list) {
3565 error = xlog_recover_commit_pass2(log, trans,
3575 * Perform the transaction.
3577 * If the transaction modifies a buffer or inode, do it now. Otherwise,
3578 * EFIs and EFDs get queued up by adding entries into the AIL for them.
3581 xlog_recover_commit_trans(
3583 struct xlog_recover *trans,
3588 int items_queued = 0;
3589 struct xlog_recover_item *item;
3590 struct xlog_recover_item *next;
3591 LIST_HEAD (buffer_list);
3592 LIST_HEAD (ra_list);
3593 LIST_HEAD (done_list);
3595 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3597 hlist_del(&trans->r_list);
3599 error = xlog_recover_reorder_trans(log, trans, pass);
3603 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
3605 case XLOG_RECOVER_PASS1:
3606 error = xlog_recover_commit_pass1(log, trans, item);
3608 case XLOG_RECOVER_PASS2:
3609 xlog_recover_ra_pass2(log, item);
3610 list_move_tail(&item->ri_list, &ra_list);
3612 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
3613 error = xlog_recover_items_pass2(log, trans,
3614 &buffer_list, &ra_list);
3615 list_splice_tail_init(&ra_list, &done_list);
3629 if (!list_empty(&ra_list)) {
3631 error = xlog_recover_items_pass2(log, trans,
3632 &buffer_list, &ra_list);
3633 list_splice_tail_init(&ra_list, &done_list);
3636 if (!list_empty(&done_list))
3637 list_splice_init(&done_list, &trans->r_itemq);
3639 xlog_recover_free_trans(trans);
3641 error2 = xfs_buf_delwri_submit(&buffer_list);
3642 return error ? error : error2;
3646 xlog_recover_unmount_trans(
3648 struct xlog_recover *trans)
3650 /* Do nothing now */
3651 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3656 * There are two valid states of the r_state field. 0 indicates that the
3657 * transaction structure is in a normal state. We have either seen the
3658 * start of the transaction or the last operation we added was not a partial
3659 * operation. If the last operation we added to the transaction was a
3660 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
3662 * NOTE: skip LRs with 0 data length.
3665 xlog_recover_process_data(
3667 struct hlist_head rhash[],
3668 struct xlog_rec_header *rhead,
3674 xlog_op_header_t *ohead;
3675 xlog_recover_t *trans;
3681 lp = dp + be32_to_cpu(rhead->h_len);
3682 num_logops = be32_to_cpu(rhead->h_num_logops);
3684 /* check the log format matches our own - else we can't recover */
3685 if (xlog_header_check_recover(log->l_mp, rhead))
3686 return (XFS_ERROR(EIO));
3688 while ((dp < lp) && num_logops) {
3689 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
3690 ohead = (xlog_op_header_t *)dp;
3691 dp += sizeof(xlog_op_header_t);
3692 if (ohead->oh_clientid != XFS_TRANSACTION &&
3693 ohead->oh_clientid != XFS_LOG) {
3694 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
3695 __func__, ohead->oh_clientid);
3697 return (XFS_ERROR(EIO));
3699 tid = be32_to_cpu(ohead->oh_tid);
3700 hash = XLOG_RHASH(tid);
3701 trans = xlog_recover_find_tid(&rhash[hash], tid);
3702 if (trans == NULL) { /* not found; add new tid */
3703 if (ohead->oh_flags & XLOG_START_TRANS)
3704 xlog_recover_new_tid(&rhash[hash], tid,
3705 be64_to_cpu(rhead->h_lsn));
3707 if (dp + be32_to_cpu(ohead->oh_len) > lp) {
3708 xfs_warn(log->l_mp, "%s: bad length 0x%x",
3709 __func__, be32_to_cpu(ohead->oh_len));
3711 return (XFS_ERROR(EIO));
3713 flags = ohead->oh_flags & ~XLOG_END_TRANS;
3714 if (flags & XLOG_WAS_CONT_TRANS)
3715 flags &= ~XLOG_CONTINUE_TRANS;
3717 case XLOG_COMMIT_TRANS:
3718 error = xlog_recover_commit_trans(log,
3721 case XLOG_UNMOUNT_TRANS:
3722 error = xlog_recover_unmount_trans(log, trans);
3724 case XLOG_WAS_CONT_TRANS:
3725 error = xlog_recover_add_to_cont_trans(log,
3727 be32_to_cpu(ohead->oh_len));
3729 case XLOG_START_TRANS:
3730 xfs_warn(log->l_mp, "%s: bad transaction",
3733 error = XFS_ERROR(EIO);
3736 case XLOG_CONTINUE_TRANS:
3737 error = xlog_recover_add_to_trans(log, trans,
3738 dp, be32_to_cpu(ohead->oh_len));
3741 xfs_warn(log->l_mp, "%s: bad flag 0x%x",
3744 error = XFS_ERROR(EIO);
3750 dp += be32_to_cpu(ohead->oh_len);
3757 * Process an extent free intent item that was recovered from
3758 * the log. We need to free the extents that it describes.
3761 xlog_recover_process_efi(
3763 xfs_efi_log_item_t *efip)
3765 xfs_efd_log_item_t *efdp;
3770 xfs_fsblock_t startblock_fsb;
3772 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3775 * First check the validity of the extents described by the
3776 * EFI. If any are bad, then assume that all are bad and
3777 * just toss the EFI.
3779 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3780 extp = &(efip->efi_format.efi_extents[i]);
3781 startblock_fsb = XFS_BB_TO_FSB(mp,
3782 XFS_FSB_TO_DADDR(mp, extp->ext_start));
3783 if ((startblock_fsb == 0) ||
3784 (extp->ext_len == 0) ||
3785 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3786 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3788 * This will pull the EFI from the AIL and
3789 * free the memory associated with it.
3791 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3792 xfs_efi_release(efip, efip->efi_format.efi_nextents);
3793 return XFS_ERROR(EIO);
3797 tp = xfs_trans_alloc(mp, 0);
3798 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
3801 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3803 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3804 extp = &(efip->efi_format.efi_extents[i]);
3805 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3808 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3812 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3813 error = xfs_trans_commit(tp, 0);
3817 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3822 * When this is called, all of the EFIs which did not have
3823 * corresponding EFDs should be in the AIL. What we do now
3824 * is free the extents associated with each one.
3826 * Since we process the EFIs in normal transactions, they
3827 * will be removed at some point after the commit. This prevents
3828 * us from just walking down the list processing each one.
3829 * We'll use a flag in the EFI to skip those that we've already
3830 * processed and use the AIL iteration mechanism's generation
3831 * count to try to speed this up at least a bit.
3833 * When we start, we know that the EFIs are the only things in
3834 * the AIL. As we process them, however, other items are added
3835 * to the AIL. Since everything added to the AIL must come after
3836 * everything already in the AIL, we stop processing as soon as
3837 * we see something other than an EFI in the AIL.
3840 xlog_recover_process_efis(
3843 xfs_log_item_t *lip;
3844 xfs_efi_log_item_t *efip;
3846 struct xfs_ail_cursor cur;
3847 struct xfs_ail *ailp;
3850 spin_lock(&ailp->xa_lock);
3851 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3852 while (lip != NULL) {
3854 * We're done when we see something other than an EFI.
3855 * There should be no EFIs left in the AIL now.
3857 if (lip->li_type != XFS_LI_EFI) {
3859 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3860 ASSERT(lip->li_type != XFS_LI_EFI);
3866 * Skip EFIs that we've already processed.
3868 efip = (xfs_efi_log_item_t *)lip;
3869 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3870 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3874 spin_unlock(&ailp->xa_lock);
3875 error = xlog_recover_process_efi(log->l_mp, efip);
3876 spin_lock(&ailp->xa_lock);
3879 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3882 xfs_trans_ail_cursor_done(ailp, &cur);
3883 spin_unlock(&ailp->xa_lock);
3888 * This routine performs a transaction to null out a bad inode pointer
3889 * in an agi unlinked inode hash bucket.
3892 xlog_recover_clear_agi_bucket(
3894 xfs_agnumber_t agno,
3903 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3904 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
3908 error = xfs_read_agi(mp, tp, agno, &agibp);
3912 agi = XFS_BUF_TO_AGI(agibp);
3913 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3914 offset = offsetof(xfs_agi_t, agi_unlinked) +
3915 (sizeof(xfs_agino_t) * bucket);
3916 xfs_trans_log_buf(tp, agibp, offset,
3917 (offset + sizeof(xfs_agino_t) - 1));
3919 error = xfs_trans_commit(tp, 0);
3925 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3927 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3932 xlog_recover_process_one_iunlink(
3933 struct xfs_mount *mp,
3934 xfs_agnumber_t agno,
3938 struct xfs_buf *ibp;
3939 struct xfs_dinode *dip;
3940 struct xfs_inode *ip;
3944 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3945 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3950 * Get the on disk inode to find the next inode in the bucket.
3952 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3956 ASSERT(ip->i_d.di_nlink == 0);
3957 ASSERT(ip->i_d.di_mode != 0);
3959 /* setup for the next pass */
3960 agino = be32_to_cpu(dip->di_next_unlinked);
3964 * Prevent any DMAPI event from being sent when the reference on
3965 * the inode is dropped.
3967 ip->i_d.di_dmevmask = 0;
3976 * We can't read in the inode this bucket points to, or this inode
3977 * is messed up. Just ditch this bucket of inodes. We will lose
3978 * some inodes and space, but at least we won't hang.
3980 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3981 * clear the inode pointer in the bucket.
3983 xlog_recover_clear_agi_bucket(mp, agno, bucket);
3988 * xlog_iunlink_recover
3990 * This is called during recovery to process any inodes which
3991 * we unlinked but not freed when the system crashed. These
3992 * inodes will be on the lists in the AGI blocks. What we do
3993 * here is scan all the AGIs and fully truncate and free any
3994 * inodes found on the lists. Each inode is removed from the
3995 * lists when it has been fully truncated and is freed. The
3996 * freeing of the inode and its removal from the list must be
4000 xlog_recover_process_iunlinks(
4004 xfs_agnumber_t agno;
4015 * Prevent any DMAPI event from being sent while in this function.
4017 mp_dmevmask = mp->m_dmevmask;
4020 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4022 * Find the agi for this ag.
4024 error = xfs_read_agi(mp, NULL, agno, &agibp);
4027 * AGI is b0rked. Don't process it.
4029 * We should probably mark the filesystem as corrupt
4030 * after we've recovered all the ag's we can....
4035 * Unlock the buffer so that it can be acquired in the normal
4036 * course of the transaction to truncate and free each inode.
4037 * Because we are not racing with anyone else here for the AGI
4038 * buffer, we don't even need to hold it locked to read the
4039 * initial unlinked bucket entries out of the buffer. We keep
4040 * buffer reference though, so that it stays pinned in memory
4041 * while we need the buffer.
4043 agi = XFS_BUF_TO_AGI(agibp);
4044 xfs_buf_unlock(agibp);
4046 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
4047 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
4048 while (agino != NULLAGINO) {
4049 agino = xlog_recover_process_one_iunlink(mp,
4050 agno, agino, bucket);
4053 xfs_buf_rele(agibp);
4056 mp->m_dmevmask = mp_dmevmask;
4060 * Upack the log buffer data and crc check it. If the check fails, issue a
4061 * warning if and only if the CRC in the header is non-zero. This makes the
4062 * check an advisory warning, and the zero CRC check will prevent failure
4063 * warnings from being emitted when upgrading the kernel from one that does not
4064 * add CRCs by default.
4066 * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
4067 * corruption failure
4070 xlog_unpack_data_crc(
4071 struct xlog_rec_header *rhead,
4077 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
4078 if (crc != rhead->h_crc) {
4079 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
4080 xfs_alert(log->l_mp,
4081 "log record CRC mismatch: found 0x%x, expected 0x%x.",
4082 le32_to_cpu(rhead->h_crc),
4084 xfs_hex_dump(dp, 32);
4088 * If we've detected a log record corruption, then we can't
4089 * recover past this point. Abort recovery if we are enforcing
4090 * CRC protection by punting an error back up the stack.
4092 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
4093 return EFSCORRUPTED;
4101 struct xlog_rec_header *rhead,
4108 error = xlog_unpack_data_crc(rhead, dp, log);
4112 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
4113 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
4114 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
4118 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4119 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
4120 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
4121 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4122 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4123 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
4132 xlog_valid_rec_header(
4134 struct xlog_rec_header *rhead,
4139 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
4140 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4141 XFS_ERRLEVEL_LOW, log->l_mp);
4142 return XFS_ERROR(EFSCORRUPTED);
4145 (!rhead->h_version ||
4146 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
4147 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
4148 __func__, be32_to_cpu(rhead->h_version));
4149 return XFS_ERROR(EIO);
4152 /* LR body must have data or it wouldn't have been written */
4153 hlen = be32_to_cpu(rhead->h_len);
4154 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
4155 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4156 XFS_ERRLEVEL_LOW, log->l_mp);
4157 return XFS_ERROR(EFSCORRUPTED);
4159 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
4160 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4161 XFS_ERRLEVEL_LOW, log->l_mp);
4162 return XFS_ERROR(EFSCORRUPTED);
4168 * Read the log from tail to head and process the log records found.
4169 * Handle the two cases where the tail and head are in the same cycle
4170 * and where the active portion of the log wraps around the end of
4171 * the physical log separately. The pass parameter is passed through
4172 * to the routines called to process the data and is not looked at
4176 xlog_do_recovery_pass(
4178 xfs_daddr_t head_blk,
4179 xfs_daddr_t tail_blk,
4182 xlog_rec_header_t *rhead;
4185 xfs_buf_t *hbp, *dbp;
4186 int error = 0, h_size;
4187 int bblks, split_bblks;
4188 int hblks, split_hblks, wrapped_hblks;
4189 struct hlist_head rhash[XLOG_RHASH_SIZE];
4191 ASSERT(head_blk != tail_blk);
4194 * Read the header of the tail block and get the iclog buffer size from
4195 * h_size. Use this to tell how many sectors make up the log header.
4197 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4199 * When using variable length iclogs, read first sector of
4200 * iclog header and extract the header size from it. Get a
4201 * new hbp that is the correct size.
4203 hbp = xlog_get_bp(log, 1);
4207 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
4211 rhead = (xlog_rec_header_t *)offset;
4212 error = xlog_valid_rec_header(log, rhead, tail_blk);
4215 h_size = be32_to_cpu(rhead->h_size);
4216 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
4217 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
4218 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
4219 if (h_size % XLOG_HEADER_CYCLE_SIZE)
4222 hbp = xlog_get_bp(log, hblks);
4227 ASSERT(log->l_sectBBsize == 1);
4229 hbp = xlog_get_bp(log, 1);
4230 h_size = XLOG_BIG_RECORD_BSIZE;
4235 dbp = xlog_get_bp(log, BTOBB(h_size));
4241 memset(rhash, 0, sizeof(rhash));
4242 if (tail_blk <= head_blk) {
4243 for (blk_no = tail_blk; blk_no < head_blk; ) {
4244 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4248 rhead = (xlog_rec_header_t *)offset;
4249 error = xlog_valid_rec_header(log, rhead, blk_no);
4253 /* blocks in data section */
4254 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4255 error = xlog_bread(log, blk_no + hblks, bblks, dbp,
4260 error = xlog_unpack_data(rhead, offset, log);
4264 error = xlog_recover_process_data(log,
4265 rhash, rhead, offset, pass);
4268 blk_no += bblks + hblks;
4272 * Perform recovery around the end of the physical log.
4273 * When the head is not on the same cycle number as the tail,
4274 * we can't do a sequential recovery as above.
4277 while (blk_no < log->l_logBBsize) {
4279 * Check for header wrapping around physical end-of-log
4281 offset = hbp->b_addr;
4284 if (blk_no + hblks <= log->l_logBBsize) {
4285 /* Read header in one read */
4286 error = xlog_bread(log, blk_no, hblks, hbp,
4291 /* This LR is split across physical log end */
4292 if (blk_no != log->l_logBBsize) {
4293 /* some data before physical log end */
4294 ASSERT(blk_no <= INT_MAX);
4295 split_hblks = log->l_logBBsize - (int)blk_no;
4296 ASSERT(split_hblks > 0);
4297 error = xlog_bread(log, blk_no,
4305 * Note: this black magic still works with
4306 * large sector sizes (non-512) only because:
4307 * - we increased the buffer size originally
4308 * by 1 sector giving us enough extra space
4309 * for the second read;
4310 * - the log start is guaranteed to be sector
4312 * - we read the log end (LR header start)
4313 * _first_, then the log start (LR header end)
4314 * - order is important.
4316 wrapped_hblks = hblks - split_hblks;
4317 error = xlog_bread_offset(log, 0,
4319 offset + BBTOB(split_hblks));
4323 rhead = (xlog_rec_header_t *)offset;
4324 error = xlog_valid_rec_header(log, rhead,
4325 split_hblks ? blk_no : 0);
4329 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4332 /* Read in data for log record */
4333 if (blk_no + bblks <= log->l_logBBsize) {
4334 error = xlog_bread(log, blk_no, bblks, dbp,
4339 /* This log record is split across the
4340 * physical end of log */
4341 offset = dbp->b_addr;
4343 if (blk_no != log->l_logBBsize) {
4344 /* some data is before the physical
4346 ASSERT(!wrapped_hblks);
4347 ASSERT(blk_no <= INT_MAX);
4349 log->l_logBBsize - (int)blk_no;
4350 ASSERT(split_bblks > 0);
4351 error = xlog_bread(log, blk_no,
4359 * Note: this black magic still works with
4360 * large sector sizes (non-512) only because:
4361 * - we increased the buffer size originally
4362 * by 1 sector giving us enough extra space
4363 * for the second read;
4364 * - the log start is guaranteed to be sector
4366 * - we read the log end (LR header start)
4367 * _first_, then the log start (LR header end)
4368 * - order is important.
4370 error = xlog_bread_offset(log, 0,
4371 bblks - split_bblks, dbp,
4372 offset + BBTOB(split_bblks));
4377 error = xlog_unpack_data(rhead, offset, log);
4381 error = xlog_recover_process_data(log, rhash,
4382 rhead, offset, pass);
4388 ASSERT(blk_no >= log->l_logBBsize);
4389 blk_no -= log->l_logBBsize;
4391 /* read first part of physical log */
4392 while (blk_no < head_blk) {
4393 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4397 rhead = (xlog_rec_header_t *)offset;
4398 error = xlog_valid_rec_header(log, rhead, blk_no);
4402 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4403 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
4408 error = xlog_unpack_data(rhead, offset, log);
4412 error = xlog_recover_process_data(log, rhash,
4413 rhead, offset, pass);
4416 blk_no += bblks + hblks;
4428 * Do the recovery of the log. We actually do this in two phases.
4429 * The two passes are necessary in order to implement the function
4430 * of cancelling a record written into the log. The first pass
4431 * determines those things which have been cancelled, and the
4432 * second pass replays log items normally except for those which
4433 * have been cancelled. The handling of the replay and cancellations
4434 * takes place in the log item type specific routines.
4436 * The table of items which have cancel records in the log is allocated
4437 * and freed at this level, since only here do we know when all of
4438 * the log recovery has been completed.
4441 xlog_do_log_recovery(
4443 xfs_daddr_t head_blk,
4444 xfs_daddr_t tail_blk)
4448 ASSERT(head_blk != tail_blk);
4451 * First do a pass to find all of the cancelled buf log items.
4452 * Store them in the buf_cancel_table for use in the second pass.
4454 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4455 sizeof(struct list_head),
4457 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4458 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4460 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4461 XLOG_RECOVER_PASS1);
4463 kmem_free(log->l_buf_cancel_table);
4464 log->l_buf_cancel_table = NULL;
4468 * Then do a second pass to actually recover the items in the log.
4469 * When it is complete free the table of buf cancel items.
4471 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4472 XLOG_RECOVER_PASS2);
4477 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4478 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4482 kmem_free(log->l_buf_cancel_table);
4483 log->l_buf_cancel_table = NULL;
4489 * Do the actual recovery
4494 xfs_daddr_t head_blk,
4495 xfs_daddr_t tail_blk)
4502 * First replay the images in the log.
4504 error = xlog_do_log_recovery(log, head_blk, tail_blk);
4509 * If IO errors happened during recovery, bail out.
4511 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4516 * We now update the tail_lsn since much of the recovery has completed
4517 * and there may be space available to use. If there were no extent
4518 * or iunlinks, we can free up the entire log and set the tail_lsn to
4519 * be the last_sync_lsn. This was set in xlog_find_tail to be the
4520 * lsn of the last known good LR on disk. If there are extent frees
4521 * or iunlinks they will have some entries in the AIL; so we look at
4522 * the AIL to determine how to set the tail_lsn.
4524 xlog_assign_tail_lsn(log->l_mp);
4527 * Now that we've finished replaying all buffer and inode
4528 * updates, re-read in the superblock and reverify it.
4530 bp = xfs_getsb(log->l_mp, 0);
4532 ASSERT(!(XFS_BUF_ISWRITE(bp)));
4534 XFS_BUF_UNASYNC(bp);
4535 bp->b_ops = &xfs_sb_buf_ops;
4536 xfsbdstrat(log->l_mp, bp);
4537 error = xfs_buf_iowait(bp);
4539 xfs_buf_ioerror_alert(bp, __func__);
4545 /* Convert superblock from on-disk format */
4546 sbp = &log->l_mp->m_sb;
4547 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4548 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4549 ASSERT(xfs_sb_good_version(sbp));
4552 /* We've re-read the superblock so re-initialize per-cpu counters */
4553 xfs_icsb_reinit_counters(log->l_mp);
4555 xlog_recover_check_summary(log);
4557 /* Normal transactions can now occur */
4558 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4563 * Perform recovery and re-initialize some log variables in xlog_find_tail.
4565 * Return error or zero.
4571 xfs_daddr_t head_blk, tail_blk;
4574 /* find the tail of the log */
4575 if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
4578 if (tail_blk != head_blk) {
4579 /* There used to be a comment here:
4581 * disallow recovery on read-only mounts. note -- mount
4582 * checks for ENOSPC and turns it into an intelligent
4584 * ...but this is no longer true. Now, unless you specify
4585 * NORECOVERY (in which case this function would never be
4586 * called), we just go ahead and recover. We do this all
4587 * under the vfs layer, so we can get away with it unless
4588 * the device itself is read-only, in which case we fail.
4590 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4595 * Version 5 superblock log feature mask validation. We know the
4596 * log is dirty so check if there are any unknown log features
4597 * in what we need to recover. If there are unknown features
4598 * (e.g. unsupported transactions, then simply reject the
4599 * attempt at recovery before touching anything.
4601 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
4602 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
4603 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
4605 "Superblock has unknown incompatible log features (0x%x) enabled.\n"
4606 "The log can not be fully and/or safely recovered by this kernel.\n"
4607 "Please recover the log on a kernel that supports the unknown features.",
4608 (log->l_mp->m_sb.sb_features_log_incompat &
4609 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
4613 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
4614 log->l_mp->m_logname ? log->l_mp->m_logname
4617 error = xlog_do_recover(log, head_blk, tail_blk);
4618 log->l_flags |= XLOG_RECOVERY_NEEDED;
4624 * In the first part of recovery we replay inodes and buffers and build
4625 * up the list of extent free items which need to be processed. Here
4626 * we process the extent free items and clean up the on disk unlinked
4627 * inode lists. This is separated from the first part of recovery so
4628 * that the root and real-time bitmap inodes can be read in from disk in
4629 * between the two stages. This is necessary so that we can free space
4630 * in the real-time portion of the file system.
4633 xlog_recover_finish(
4637 * Now we're ready to do the transactions needed for the
4638 * rest of recovery. Start with completing all the extent
4639 * free intent records and then process the unlinked inode
4640 * lists. At this point, we essentially run in normal mode
4641 * except that we're still performing recovery actions
4642 * rather than accepting new requests.
4644 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
4646 error = xlog_recover_process_efis(log);
4648 xfs_alert(log->l_mp, "Failed to recover EFIs");
4652 * Sync the log to get all the EFIs out of the AIL.
4653 * This isn't absolutely necessary, but it helps in
4654 * case the unlink transactions would have problems
4655 * pushing the EFIs out of the way.
4657 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
4659 xlog_recover_process_iunlinks(log);
4661 xlog_recover_check_summary(log);
4663 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
4664 log->l_mp->m_logname ? log->l_mp->m_logname
4666 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
4668 xfs_info(log->l_mp, "Ending clean mount");
4676 * Read all of the agf and agi counters and check that they
4677 * are consistent with the superblock counters.
4680 xlog_recover_check_summary(
4687 xfs_agnumber_t agno;
4688 __uint64_t freeblks;
4698 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4699 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4701 xfs_alert(mp, "%s agf read failed agno %d error %d",
4702 __func__, agno, error);
4704 agfp = XFS_BUF_TO_AGF(agfbp);
4705 freeblks += be32_to_cpu(agfp->agf_freeblks) +
4706 be32_to_cpu(agfp->agf_flcount);
4707 xfs_buf_relse(agfbp);
4710 error = xfs_read_agi(mp, NULL, agno, &agibp);
4712 xfs_alert(mp, "%s agi read failed agno %d error %d",
4713 __func__, agno, error);
4715 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
4717 itotal += be32_to_cpu(agi->agi_count);
4718 ifree += be32_to_cpu(agi->agi_freecount);
4719 xfs_buf_relse(agibp);