]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - fs/xfs/xfs_log_recover.c
Merge branch 'linux-2.6'
[mv-sheeva.git] / fs / xfs / xfs_log_recover.c
index ca74d3f5910e75cd4810a083651c7073b71ddb3b..dacb19739cc2830ae561a811321362e0fbf2bd6d 100644 (file)
@@ -927,6 +927,14 @@ xlog_find_tail(
                        ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle,
                                        after_umount_blk);
                        *tail_blk = after_umount_blk;
+
+                       /*
+                        * Note that the unmount was clean. If the unmount
+                        * was not clean, we need to know this to rebuild the
+                        * superblock counters from the perag headers if we
+                        * have a filesystem using non-persistent counters.
+                        */
+                       log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
                }
        }
 
@@ -1358,7 +1366,7 @@ xlog_recover_add_to_cont_trans(
        int                     old_len;
 
        item = trans->r_itemq;
-       if (item == 0) {
+       if (item == NULL) {
                /* finish copying rest of trans header */
                xlog_recover_add_item(&trans->r_itemq);
                ptr = (xfs_caddr_t) &trans->r_theader +
@@ -1404,7 +1412,7 @@ xlog_recover_add_to_trans(
        if (!len)
                return 0;
        item = trans->r_itemq;
-       if (item == 0) {
+       if (item == NULL) {
                ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
                if (len == sizeof(xfs_trans_header_t))
                        xlog_recover_add_item(&trans->r_itemq);
@@ -1459,12 +1467,12 @@ xlog_recover_unlink_tid(
        xlog_recover_t          *tp;
        int                     found = 0;
 
-       ASSERT(trans != 0);
+       ASSERT(trans != NULL);
        if (trans == *q) {
                *q = (*q)->r_next;
        } else {
                tp = *q;
-               while (tp != 0) {
+               while (tp) {
                        if (tp->r_next == trans) {
                                found = 1;
                                break;
@@ -1487,7 +1495,7 @@ xlog_recover_insert_item_backq(
        xlog_recover_item_t     **q,
        xlog_recover_item_t     *item)
 {
-       if (*q == 0) {
+       if (*q == NULL) {
                item->ri_prev = item->ri_next = item;
                *q = item;
        } else {
@@ -1509,7 +1517,6 @@ xlog_recover_insert_item_frontq(
 
 STATIC int
 xlog_recover_reorder_trans(
-       xlog_t                  *log,
        xlog_recover_t          *trans)
 {
        xlog_recover_item_t     *first_item, *itemq, *itemq_next;
@@ -1878,6 +1885,50 @@ xlog_recover_do_reg_buffer(
        unsigned int            *data_map = NULL;
        unsigned int            map_size = 0;
        int                     error;
+       int                     stale_buf = 1;
+
+       /*
+        * Scan through the on-disk inode buffer and attempt to
+        * determine if it has been written to since it was logged.
+        *
+        * - If any of the magic numbers are incorrect then the buffer is stale
+        * - If any of the modes are non-zero then the buffer is not stale
+        * - If all of the modes are zero and at least one of the generation
+        *   counts is non-zero then the buffer is stale
+        *
+        * If the end result is a stale buffer then the log buffer is replayed
+        * otherwise it is skipped.
+        *
+        * This heuristic is not perfect.  It can be improved by scanning the
+        * entire inode chunk for evidence that any of the inode clusters have
+        * been updated.  To fix this problem completely we will need a major
+        * architectural change to the logging system.
+        */
+       if (buf_f->blf_flags & XFS_BLI_INODE_NEW_BUF) {
+               xfs_dinode_t    *dip;
+               int             inodes_per_buf;
+               int             mode_count = 0;
+               int             gen_count = 0;
+
+               stale_buf = 0;
+               inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
+               for (i = 0; i < inodes_per_buf; i++) {
+                       dip = (xfs_dinode_t *)xfs_buf_offset(bp,
+                               i * mp->m_sb.sb_inodesize);
+                       if (be16_to_cpu(dip->di_core.di_magic) !=
+                                       XFS_DINODE_MAGIC) {
+                               stale_buf = 1;
+                               break;
+                       }
+                       if (be16_to_cpu(dip->di_core.di_mode))
+                               mode_count++;
+                       if (be16_to_cpu(dip->di_core.di_gen))
+                               gen_count++;
+               }
+
+               if (!mode_count && gen_count)
+                       stale_buf = 1;
+       }
 
        switch (buf_f->blf_type) {
        case XFS_LI_BUF:
@@ -1893,7 +1944,7 @@ xlog_recover_do_reg_buffer(
                        break;
                nbits = xfs_contig_bits(data_map, map_size, bit);
                ASSERT(nbits > 0);
-               ASSERT(item->ri_buf[i].i_addr != 0);
+               ASSERT(item->ri_buf[i].i_addr != NULL);
                ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
                ASSERT(XFS_BUF_COUNT(bp) >=
                       ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
@@ -1911,7 +1962,7 @@ xlog_recover_do_reg_buffer(
                                               -1, 0, XFS_QMOPT_DOWARN,
                                               "dquot_buf_recover");
                }
-               if (!error)
+               if (!error && stale_buf)
                        memcpy(xfs_buf_offset(bp,
                                (uint)bit << XFS_BLI_SHIFT),    /* dest */
                                item->ri_buf[i].i_addr,         /* source */
@@ -2765,7 +2816,7 @@ xlog_recover_do_trans(
        int                     error = 0;
        xlog_recover_item_t     *item, *first_item;
 
-       if ((error = xlog_recover_reorder_trans(log, trans)))
+       if ((error = xlog_recover_reorder_trans(trans)))
                return error;
        first_item = item = trans->r_itemq;
        do {
@@ -3016,7 +3067,7 @@ xlog_recover_process_efi(
        }
 
        efip->efi_flags |= XFS_EFI_RECOVERED;
-       xfs_trans_commit(tp, 0, NULL);
+       xfs_trans_commit(tp, 0);
 }
 
 /*
@@ -3143,7 +3194,7 @@ xlog_recover_clear_agi_bucket(
        xfs_trans_log_buf(tp, agibp, offset,
                          (offset + sizeof(xfs_agino_t) - 1));
 
-       (void) xfs_trans_commit(tp, 0, NULL);
+       (void) xfs_trans_commit(tp, 0);
 }
 
 /*
@@ -3886,8 +3937,7 @@ xlog_recover(
                 * under the vfs layer, so we can get away with it unless
                 * the device itself is read-only, in which case we fail.
                 */
-               if ((error = xfs_dev_is_read_only(log->l_mp,
-                                               "recovery required"))) {
+               if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
                        return error;
                }