]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - fs/xfs/xfs_mount.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[mv-sheeva.git] / fs / xfs / xfs_mount.c
index 37c612ce3d051d33b8f32fa185d8ce6477a6c5e1..3bed0cf0d8afa2eaa87a98b00be0a10034c701b7 100644 (file)
@@ -52,11 +52,11 @@ STATIC void xfs_unmountfs_wait(xfs_mount_t *);
 
 #ifdef HAVE_PERCPU_SB
 STATIC void    xfs_icsb_destroy_counters(xfs_mount_t *);
-STATIC void    xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int,
-int);
+STATIC void    xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
+                                               int, int);
 STATIC void    xfs_icsb_sync_counters(xfs_mount_t *);
 STATIC int     xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
-                                               int, int);
+                                               int64_t, int);
 STATIC int     xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
 
 #else
@@ -543,11 +543,8 @@ xfs_readsb(xfs_mount_t *mp, int flags)
                ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
        }
 
-       mutex_lock(&mp->m_icsb_mutex);
-       xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
-       xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
-       xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
-       mutex_unlock(&mp->m_icsb_mutex);
+       /* Initialize per-cpu counters */
+       xfs_icsb_reinit_counters(mp);
 
        mp->m_sb_bp = bp;
        xfs_buf_relse(bp);
@@ -1254,8 +1251,11 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
  * The SB_LOCK must be held when this routine is called.
  */
 int
-xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
-                       int delta, int rsvd)
+xfs_mod_incore_sb_unlocked(
+       xfs_mount_t     *mp,
+       xfs_sb_field_t  field,
+       int64_t         delta,
+       int             rsvd)
 {
        int             scounter;       /* short counter for 32 bit fields */
        long long       lcounter;       /* long counter for 64 bit fields */
@@ -1287,7 +1287,6 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
                mp->m_sb.sb_ifree = lcounter;
                return 0;
        case XFS_SBS_FDBLOCKS:
-
                lcounter = (long long)
                        mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
                res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
@@ -1418,7 +1417,11 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
  * routine to do the work.
  */
 int
-xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd)
+xfs_mod_incore_sb(
+       xfs_mount_t     *mp,
+       xfs_sb_field_t  field,
+       int64_t         delta,
+       int             rsvd)
 {
        unsigned long   s;
        int     status;
@@ -1736,17 +1739,17 @@ xfs_icsb_cpu_notify(
                memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
                break;
        case CPU_ONLINE:
-               mutex_lock(&mp->m_icsb_mutex);
+               xfs_icsb_lock(mp);
                xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
                xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
                xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
-               mutex_unlock(&mp->m_icsb_mutex);
+               xfs_icsb_unlock(mp);
                break;
        case CPU_DEAD:
                /* Disable all the counters, then fold the dead cpu's
                 * count into the total on the global superblock and
                 * re-enable the counters. */
-               mutex_lock(&mp->m_icsb_mutex);
+               xfs_icsb_lock(mp);
                s = XFS_SB_LOCK(mp);
                xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
                xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
@@ -1765,7 +1768,7 @@ xfs_icsb_cpu_notify(
                xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
                                         XFS_ICSB_SB_LOCKED, 0);
                XFS_SB_UNLOCK(mp, s);
-               mutex_unlock(&mp->m_icsb_mutex);
+               xfs_icsb_unlock(mp);
                break;
        }
 
@@ -1805,6 +1808,22 @@ xfs_icsb_init_counters(
        return 0;
 }
 
+void
+xfs_icsb_reinit_counters(
+       xfs_mount_t     *mp)
+{
+       xfs_icsb_lock(mp);
+       /*
+        * start with all counters disabled so that the
+        * initial balance kicks us off correctly
+        */
+       mp->m_icsb_counters = -1;
+       xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
+       xfs_icsb_unlock(mp);
+}
+
 STATIC void
 xfs_icsb_destroy_counters(
        xfs_mount_t     *mp)
@@ -1813,6 +1832,7 @@ xfs_icsb_destroy_counters(
                unregister_hotcpu_notifier(&mp->m_icsb_notifier);
                free_percpu(mp->m_sb_cnts);
        }
+       mutex_destroy(&mp->m_icsb_mutex);
 }
 
 STATIC_INLINE void
@@ -1972,8 +1992,8 @@ xfs_icsb_enable_counter(
        xfs_icsb_unlock_all_counters(mp);
 }
 
-STATIC void
-xfs_icsb_sync_counters_int(
+void
+xfs_icsb_sync_counters_flags(
        xfs_mount_t     *mp,
        int             flags)
 {
@@ -2005,17 +2025,7 @@ STATIC void
 xfs_icsb_sync_counters(
        xfs_mount_t     *mp)
 {
-       xfs_icsb_sync_counters_int(mp, 0);
-}
-
-/*
- * lazy addition used for things like df, background sb syncs, etc
- */
-void
-xfs_icsb_sync_counters_lazy(
-       xfs_mount_t     *mp)
-{
-       xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT);
+       xfs_icsb_sync_counters_flags(mp, 0);
 }
 
 /*
@@ -2091,7 +2101,7 @@ int
 xfs_icsb_modify_counters(
        xfs_mount_t     *mp,
        xfs_sb_field_t  field,
-       int             delta,
+       int64_t         delta,
        int             rsvd)
 {
        xfs_icsb_cnts_t *icsbp;
@@ -2156,7 +2166,7 @@ slow_path:
         * the superblock lock. We still need to hold the superblock
         * lock, however, when we modify the global structures.
         */
-       mutex_lock(&mp->m_icsb_mutex);
+       xfs_icsb_lock(mp);
 
        /*
         * Now running atomically.
@@ -2165,7 +2175,7 @@ slow_path:
         * Drop the lock and try again in the fast path....
         */
        if (!(xfs_icsb_counter_disabled(mp, field))) {
-               mutex_unlock(&mp->m_icsb_mutex);
+               xfs_icsb_unlock(mp);
                goto again;
        }
 
@@ -2192,7 +2202,7 @@ slow_path:
         */
        if (ret != ENOSPC)
                xfs_icsb_balance_counter(mp, field, 0, 0);
-       mutex_unlock(&mp->m_icsb_mutex);
+       xfs_icsb_unlock(mp);
        return ret;
 
 balance_counter:
@@ -2205,7 +2215,7 @@ balance_counter:
         * do more balances than strictly necessary but it is not
         * the common slowpath case.
         */
-       mutex_lock(&mp->m_icsb_mutex);
+       xfs_icsb_lock(mp);
 
        /*
         * running atomically.
@@ -2216,7 +2226,7 @@ balance_counter:
         * another balance operation being required.
         */
        xfs_icsb_balance_counter(mp, field, 0, delta);
-       mutex_unlock(&mp->m_icsb_mutex);
+       xfs_icsb_unlock(mp);
        goto again;
 }