2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_alloc.h"
40 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
44 #include "xfs_quota.h"
45 #include "xfs_fsops.h"
47 STATIC void xfs_mount_log_sbunit(xfs_mount_t *, __int64_t);
48 STATIC int xfs_uuid_mount(xfs_mount_t *);
49 STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
50 STATIC void xfs_unmountfs_wait(xfs_mount_t *);
54 STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
55 STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int,
57 STATIC void xfs_icsb_sync_counters(xfs_mount_t *);
58 STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
60 STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
64 #define xfs_icsb_destroy_counters(mp) do { } while (0)
65 #define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0)
66 #define xfs_icsb_sync_counters(mp) do { } while (0)
67 #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
73 short type; /* 0 = integer
74 * 1 = binary / string (no translation)
77 { offsetof(xfs_sb_t, sb_magicnum), 0 },
78 { offsetof(xfs_sb_t, sb_blocksize), 0 },
79 { offsetof(xfs_sb_t, sb_dblocks), 0 },
80 { offsetof(xfs_sb_t, sb_rblocks), 0 },
81 { offsetof(xfs_sb_t, sb_rextents), 0 },
82 { offsetof(xfs_sb_t, sb_uuid), 1 },
83 { offsetof(xfs_sb_t, sb_logstart), 0 },
84 { offsetof(xfs_sb_t, sb_rootino), 0 },
85 { offsetof(xfs_sb_t, sb_rbmino), 0 },
86 { offsetof(xfs_sb_t, sb_rsumino), 0 },
87 { offsetof(xfs_sb_t, sb_rextsize), 0 },
88 { offsetof(xfs_sb_t, sb_agblocks), 0 },
89 { offsetof(xfs_sb_t, sb_agcount), 0 },
90 { offsetof(xfs_sb_t, sb_rbmblocks), 0 },
91 { offsetof(xfs_sb_t, sb_logblocks), 0 },
92 { offsetof(xfs_sb_t, sb_versionnum), 0 },
93 { offsetof(xfs_sb_t, sb_sectsize), 0 },
94 { offsetof(xfs_sb_t, sb_inodesize), 0 },
95 { offsetof(xfs_sb_t, sb_inopblock), 0 },
96 { offsetof(xfs_sb_t, sb_fname[0]), 1 },
97 { offsetof(xfs_sb_t, sb_blocklog), 0 },
98 { offsetof(xfs_sb_t, sb_sectlog), 0 },
99 { offsetof(xfs_sb_t, sb_inodelog), 0 },
100 { offsetof(xfs_sb_t, sb_inopblog), 0 },
101 { offsetof(xfs_sb_t, sb_agblklog), 0 },
102 { offsetof(xfs_sb_t, sb_rextslog), 0 },
103 { offsetof(xfs_sb_t, sb_inprogress), 0 },
104 { offsetof(xfs_sb_t, sb_imax_pct), 0 },
105 { offsetof(xfs_sb_t, sb_icount), 0 },
106 { offsetof(xfs_sb_t, sb_ifree), 0 },
107 { offsetof(xfs_sb_t, sb_fdblocks), 0 },
108 { offsetof(xfs_sb_t, sb_frextents), 0 },
109 { offsetof(xfs_sb_t, sb_uquotino), 0 },
110 { offsetof(xfs_sb_t, sb_gquotino), 0 },
111 { offsetof(xfs_sb_t, sb_qflags), 0 },
112 { offsetof(xfs_sb_t, sb_flags), 0 },
113 { offsetof(xfs_sb_t, sb_shared_vn), 0 },
114 { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
115 { offsetof(xfs_sb_t, sb_unit), 0 },
116 { offsetof(xfs_sb_t, sb_width), 0 },
117 { offsetof(xfs_sb_t, sb_dirblklog), 0 },
118 { offsetof(xfs_sb_t, sb_logsectlog), 0 },
119 { offsetof(xfs_sb_t, sb_logsectsize),0 },
120 { offsetof(xfs_sb_t, sb_logsunit), 0 },
121 { offsetof(xfs_sb_t, sb_features2), 0 },
122 { sizeof(xfs_sb_t), 0 }
126 * Return a pointer to an initialized xfs_mount structure.
133 mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP);
135 if (xfs_icsb_init_counters(mp)) {
136 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
139 AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail");
140 spinlock_init(&mp->m_sb_lock, "xfs_sb");
141 mutex_init(&mp->m_ilock);
142 initnsema(&mp->m_growlock, 1, "xfs_grow");
144 * Initialize the AIL.
146 xfs_trans_ail_init(mp);
148 atomic_set(&mp->m_active_trans, 0);
154 * Free up the resources associated with a mount structure. Assume that
155 * the structure was initially zeroed, so we can tell which fields got
171 for (agno = 0; agno < mp->m_maxagi; agno++)
172 if (mp->m_perag[agno].pagb_list)
173 kmem_free(mp->m_perag[agno].pagb_list,
174 sizeof(xfs_perag_busy_t) *
176 kmem_free(mp->m_perag,
177 sizeof(xfs_perag_t) * mp->m_sb.sb_agcount);
180 AIL_LOCK_DESTROY(&mp->m_ail_lock);
181 spinlock_destroy(&mp->m_sb_lock);
182 mutex_destroy(&mp->m_ilock);
183 freesema(&mp->m_growlock);
187 if (mp->m_fsname != NULL)
188 kmem_free(mp->m_fsname, mp->m_fsname_len);
189 if (mp->m_rtname != NULL)
190 kmem_free(mp->m_rtname, strlen(mp->m_rtname) + 1);
191 if (mp->m_logname != NULL)
192 kmem_free(mp->m_logname, strlen(mp->m_logname) + 1);
195 struct bhv_vfs *vfsp = XFS_MTOVFS(mp);
197 bhv_remove_all_vfsops(vfsp, 0);
198 VFS_REMOVEBHV(vfsp, &mp->m_bhv);
201 xfs_icsb_destroy_counters(mp);
202 kmem_free(mp, sizeof(xfs_mount_t));
207 * Check the validity of the SB found.
210 xfs_mount_validate_sb(
216 * If the log device and data device have the
217 * same device number, the log is internal.
218 * Consequently, the sb_logstart should be non-zero. If
219 * we have a zero sb_logstart in this case, we may be trying to mount
220 * a volume filesystem in a non-volume manner.
222 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
223 xfs_fs_mount_cmn_err(flags, "bad magic number");
224 return XFS_ERROR(EWRONGFS);
227 if (!XFS_SB_GOOD_VERSION(sbp)) {
228 xfs_fs_mount_cmn_err(flags, "bad version");
229 return XFS_ERROR(EWRONGFS);
233 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
234 xfs_fs_mount_cmn_err(flags,
235 "filesystem is marked as having an external log; "
236 "specify logdev on the\nmount command line.");
237 return XFS_ERROR(EINVAL);
241 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
242 xfs_fs_mount_cmn_err(flags,
243 "filesystem is marked as having an internal log; "
244 "do not specify logdev on\nthe mount command line.");
245 return XFS_ERROR(EINVAL);
249 * More sanity checking. These were stolen directly from
253 sbp->sb_agcount <= 0 ||
254 sbp->sb_sectsize < XFS_MIN_SECTORSIZE ||
255 sbp->sb_sectsize > XFS_MAX_SECTORSIZE ||
256 sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG ||
257 sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG ||
258 sbp->sb_blocksize < XFS_MIN_BLOCKSIZE ||
259 sbp->sb_blocksize > XFS_MAX_BLOCKSIZE ||
260 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
261 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
262 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
263 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
264 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
265 sbp->sb_inodelog > XFS_DINODE_MAX_LOG ||
266 (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
267 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
268 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
269 (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) {
270 xfs_fs_mount_cmn_err(flags, "SB sanity check 1 failed");
271 return XFS_ERROR(EFSCORRUPTED);
275 * Sanity check AG count, size fields against data size field
278 sbp->sb_dblocks == 0 ||
280 (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks ||
281 sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) *
282 sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) {
283 xfs_fs_mount_cmn_err(flags, "SB sanity check 2 failed");
284 return XFS_ERROR(EFSCORRUPTED);
287 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
288 ASSERT(sbp->sb_blocklog >= BBSHIFT);
290 #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */
292 (sbp->sb_dblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX ||
293 (sbp->sb_rblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX)) {
294 #else /* Limited by UINT_MAX of sectors */
296 (sbp->sb_dblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX ||
297 (sbp->sb_rblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX)) {
299 xfs_fs_mount_cmn_err(flags,
300 "file system too large to be mounted on this system.");
301 return XFS_ERROR(E2BIG);
304 if (unlikely(sbp->sb_inprogress)) {
305 xfs_fs_mount_cmn_err(flags, "file system busy");
306 return XFS_ERROR(EFSCORRUPTED);
310 * Version 1 directory format has never worked on Linux.
312 if (unlikely(!XFS_SB_VERSION_HASDIRV2(sbp))) {
313 xfs_fs_mount_cmn_err(flags,
314 "file system using version 1 directory format");
315 return XFS_ERROR(ENOSYS);
319 * Until this is fixed only page-sized or smaller data blocks work.
321 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
322 xfs_fs_mount_cmn_err(flags,
323 "file system with blocksize %d bytes",
325 xfs_fs_mount_cmn_err(flags,
326 "only pagesize (%ld) or less will currently work.",
328 return XFS_ERROR(ENOSYS);
335 xfs_initialize_perag(
338 xfs_agnumber_t agcount)
340 xfs_agnumber_t index, max_metadata;
344 xfs_sb_t *sbp = &mp->m_sb;
345 xfs_ino_t max_inum = XFS_MAXINUMBER_32;
347 /* Check to see if the filesystem can overflow 32 bit inodes */
348 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
349 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
351 /* Clear the mount flag if no inode can overflow 32 bits
352 * on this filesystem, or if specifically requested..
354 if ((vfs->vfs_flag & VFS_32BITINODES) && ino > max_inum) {
355 mp->m_flags |= XFS_MOUNT_32BITINODES;
357 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
360 /* If we can overflow then setup the ag headers accordingly */
361 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
362 /* Calculate how much should be reserved for inodes to
363 * meet the max inode percentage.
365 if (mp->m_maxicount) {
368 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
370 icount += sbp->sb_agblocks - 1;
371 do_div(icount, sbp->sb_agblocks);
372 max_metadata = icount;
374 max_metadata = agcount;
376 for (index = 0; index < agcount; index++) {
377 ino = XFS_AGINO_TO_INO(mp, index, agino);
378 if (ino > max_inum) {
383 /* This ag is preferred for inodes */
384 pag = &mp->m_perag[index];
385 pag->pagi_inodeok = 1;
386 if (index < max_metadata)
387 pag->pagf_metadata = 1;
390 /* Setup default behavior for smaller filesystems */
391 for (index = 0; index < agcount; index++) {
392 pag = &mp->m_perag[index];
393 pag->pagi_inodeok = 1;
402 * data - on disk version of sb
404 * dir - conversion direction: <0 - convert sb to buf
405 * >0 - convert buf to sb
406 * fields - which fields to copy (bitmask)
427 buf_ptr = (xfs_caddr_t)data;
428 mem_ptr = (xfs_caddr_t)sb;
431 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
432 first = xfs_sb_info[f].offset;
433 size = xfs_sb_info[f + 1].offset - first;
435 ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
437 if (size == 1 || xfs_sb_info[f].type == 1) {
439 memcpy(mem_ptr + first, buf_ptr + first, size);
441 memcpy(buf_ptr + first, mem_ptr + first, size);
446 INT_XLATE(*(__uint16_t*)(buf_ptr+first),
447 *(__uint16_t*)(mem_ptr+first),
451 INT_XLATE(*(__uint32_t*)(buf_ptr+first),
452 *(__uint32_t*)(mem_ptr+first),
456 INT_XLATE(*(__uint64_t*)(buf_ptr+first),
457 *(__uint64_t*)(mem_ptr+first), dir, ARCH_CONVERT);
464 fields &= ~(1LL << f);
471 * Does the initial read of the superblock.
474 xfs_readsb(xfs_mount_t *mp, int flags)
476 unsigned int sector_size;
477 unsigned int extra_flags;
482 ASSERT(mp->m_sb_bp == NULL);
483 ASSERT(mp->m_ddev_targp != NULL);
486 * Allocate a (locked) buffer to hold the superblock.
487 * This will be kept around at all times to optimize
488 * access to the superblock.
490 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
491 extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
493 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
494 BTOBB(sector_size), extra_flags);
495 if (!bp || XFS_BUF_ISERROR(bp)) {
496 xfs_fs_mount_cmn_err(flags, "SB read failed");
497 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
500 ASSERT(XFS_BUF_ISBUSY(bp));
501 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
504 * Initialize the mount structure from the superblock.
505 * But first do some basic consistency checking.
507 sbp = XFS_BUF_TO_SBP(bp);
508 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, XFS_SB_ALL_BITS);
510 error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags);
512 xfs_fs_mount_cmn_err(flags, "SB validate failed");
517 * We must be able to do sector-sized and sector-aligned IO.
519 if (sector_size > mp->m_sb.sb_sectsize) {
520 xfs_fs_mount_cmn_err(flags,
521 "device supports only %u byte sectors (not %u)",
522 sector_size, mp->m_sb.sb_sectsize);
528 * If device sector size is smaller than the superblock size,
529 * re-read the superblock so the buffer is correctly sized.
531 if (sector_size < mp->m_sb.sb_sectsize) {
532 XFS_BUF_UNMANAGE(bp);
534 sector_size = mp->m_sb.sb_sectsize;
535 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
536 BTOBB(sector_size), extra_flags);
537 if (!bp || XFS_BUF_ISERROR(bp)) {
538 xfs_fs_mount_cmn_err(flags, "SB re-read failed");
539 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
542 ASSERT(XFS_BUF_ISBUSY(bp));
543 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
546 mutex_lock(&mp->m_icsb_mutex);
547 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
548 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
549 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
550 mutex_unlock(&mp->m_icsb_mutex);
554 ASSERT(XFS_BUF_VALUSEMA(bp) > 0);
559 XFS_BUF_UNMANAGE(bp);
569 * Mount initialization code establishing various mount
570 * fields from the superblock associated with the given
574 xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
578 mp->m_agfrotor = mp->m_agirotor = 0;
579 spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock");
580 mp->m_maxagi = mp->m_sb.sb_agcount;
581 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
582 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
583 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
584 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
585 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
586 mp->m_litino = sbp->sb_inodesize -
587 ((uint)sizeof(xfs_dinode_core_t) + (uint)sizeof(xfs_agino_t));
588 mp->m_blockmask = sbp->sb_blocksize - 1;
589 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
590 mp->m_blockwmask = mp->m_blockwsize - 1;
591 INIT_LIST_HEAD(&mp->m_del_inodes);
594 * Setup for attributes, in case they get created.
595 * This value is for inodes getting attributes for the first time,
596 * the per-inode value is for old attribute values.
598 ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048);
599 switch (sbp->sb_inodesize) {
601 mp->m_attroffset = XFS_LITINO(mp) -
602 XFS_BMDR_SPACE_CALC(MINABTPTRS);
607 mp->m_attroffset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
612 ASSERT(mp->m_attroffset < XFS_LITINO(mp));
614 for (i = 0; i < 2; i++) {
615 mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
617 mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
620 for (i = 0; i < 2; i++) {
621 mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
623 mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
626 for (i = 0; i < 2; i++) {
627 mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
629 mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
633 mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
634 mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
636 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
641 * This function does the following on an initial mount of a file system:
642 * - reads the superblock from disk and init the mount struct
643 * - if we're a 32-bit kernel, do a size check on the superblock
644 * so we don't mount terabyte filesystems
645 * - init mount struct realtime fields
646 * - allocate inode hash table for fs
647 * - init directory manager
648 * - perform recovery and init the log manager
657 xfs_sb_t *sbp = &(mp->m_sb);
659 bhv_vnode_t *rvp = NULL;
660 int readio_log, writeio_log;
663 __int64_t update_flags;
664 uint quotamount, quotaflags;
666 int uuid_mounted = 0;
669 if (mp->m_sb_bp == NULL) {
670 if ((error = xfs_readsb(mp, mfsi_flags))) {
674 xfs_mount_common(mp, sbp);
677 * Check if sb_agblocks is aligned at stripe boundary
678 * If sb_agblocks is NOT aligned turn off m_dalign since
679 * allocator alignment is within an ag, therefore ag has
680 * to be aligned at stripe boundary.
683 if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) {
685 * If stripe unit and stripe width are not multiples
686 * of the fs blocksize turn off alignment.
688 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
689 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
690 if (mp->m_flags & XFS_MOUNT_RETERR) {
692 "XFS: alignment check 1 failed");
693 error = XFS_ERROR(EINVAL);
696 mp->m_dalign = mp->m_swidth = 0;
699 * Convert the stripe unit and width to FSBs.
701 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
702 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
703 if (mp->m_flags & XFS_MOUNT_RETERR) {
704 error = XFS_ERROR(EINVAL);
707 xfs_fs_cmn_err(CE_WARN, mp,
708 "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)",
709 mp->m_dalign, mp->m_swidth,
714 } else if (mp->m_dalign) {
715 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
717 if (mp->m_flags & XFS_MOUNT_RETERR) {
718 xfs_fs_cmn_err(CE_WARN, mp,
719 "stripe alignment turned off: sunit(%d) less than bsize(%d)",
722 error = XFS_ERROR(EINVAL);
730 * Update superblock with new values
733 if (XFS_SB_VERSION_HASDALIGN(sbp)) {
734 if (sbp->sb_unit != mp->m_dalign) {
735 sbp->sb_unit = mp->m_dalign;
736 update_flags |= XFS_SB_UNIT;
738 if (sbp->sb_width != mp->m_swidth) {
739 sbp->sb_width = mp->m_swidth;
740 update_flags |= XFS_SB_WIDTH;
743 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
744 XFS_SB_VERSION_HASDALIGN(&mp->m_sb)) {
745 mp->m_dalign = sbp->sb_unit;
746 mp->m_swidth = sbp->sb_width;
749 xfs_alloc_compute_maxlevels(mp);
750 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
751 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
752 xfs_ialloc_compute_maxlevels(mp);
754 if (sbp->sb_imax_pct) {
757 /* Make sure the maximum inode count is a multiple of the
758 * units we allocate inodes in.
761 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
763 do_div(icount, mp->m_ialloc_blks);
764 mp->m_maxicount = (icount * mp->m_ialloc_blks) <<
769 mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
772 * XFS uses the uuid from the superblock as the unique
773 * identifier for fsid. We can not use the uuid from the volume
774 * since a single partition filesystem is identical to a single
775 * partition volume/filesystem.
777 if ((mfsi_flags & XFS_MFSI_SECOND) == 0 &&
778 (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
779 if (xfs_uuid_mount(mp)) {
780 error = XFS_ERROR(EINVAL);
784 ret64 = uuid_hash64(&sbp->sb_uuid);
785 memcpy(&vfsp->vfs_fsid, &ret64, sizeof(ret64));
789 * Set the default minimum read and write sizes unless
790 * already specified in a mount option.
791 * We use smaller I/O sizes when the file system
792 * is being used for NFS service (wsync mount option).
794 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
795 if (mp->m_flags & XFS_MOUNT_WSYNC) {
796 readio_log = XFS_WSYNC_READIO_LOG;
797 writeio_log = XFS_WSYNC_WRITEIO_LOG;
799 readio_log = XFS_READIO_LOG_LARGE;
800 writeio_log = XFS_WRITEIO_LOG_LARGE;
803 readio_log = mp->m_readio_log;
804 writeio_log = mp->m_writeio_log;
808 * Set the number of readahead buffers to use based on
809 * physical memory size.
811 if (xfs_physmem <= 4096) /* <= 16MB */
812 mp->m_nreadaheads = XFS_RW_NREADAHEAD_16MB;
813 else if (xfs_physmem <= 8192) /* <= 32MB */
814 mp->m_nreadaheads = XFS_RW_NREADAHEAD_32MB;
816 mp->m_nreadaheads = XFS_RW_NREADAHEAD_K32;
817 if (sbp->sb_blocklog > readio_log) {
818 mp->m_readio_log = sbp->sb_blocklog;
820 mp->m_readio_log = readio_log;
822 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
823 if (sbp->sb_blocklog > writeio_log) {
824 mp->m_writeio_log = sbp->sb_blocklog;
826 mp->m_writeio_log = writeio_log;
828 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
831 * Set the inode cluster size based on the physical memory
832 * size. This may still be overridden by the file system
833 * block size if it is larger than the chosen cluster size.
835 if (xfs_physmem <= btoc(32 * 1024 * 1024)) { /* <= 32 MB */
836 mp->m_inode_cluster_size = XFS_INODE_SMALL_CLUSTER_SIZE;
838 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
841 * Set whether we're using inode alignment.
843 if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) &&
844 mp->m_sb.sb_inoalignmt >=
845 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
846 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
848 mp->m_inoalign_mask = 0;
850 * If we are using stripe alignment, check whether
851 * the stripe unit is a multiple of the inode alignment
853 if (mp->m_dalign && mp->m_inoalign_mask &&
854 !(mp->m_dalign & mp->m_inoalign_mask))
855 mp->m_sinoalign = mp->m_dalign;
859 * Check that the data (and log if separate) are an ok size.
861 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
862 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
863 cmn_err(CE_WARN, "XFS: size check 1 failed");
864 error = XFS_ERROR(E2BIG);
867 error = xfs_read_buf(mp, mp->m_ddev_targp,
868 d - XFS_FSS_TO_BB(mp, 1),
869 XFS_FSS_TO_BB(mp, 1), 0, &bp);
873 cmn_err(CE_WARN, "XFS: size check 2 failed");
874 if (error == ENOSPC) {
875 error = XFS_ERROR(E2BIG);
880 if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) &&
881 mp->m_logdev_targp != mp->m_ddev_targp) {
882 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
883 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
884 cmn_err(CE_WARN, "XFS: size check 3 failed");
885 error = XFS_ERROR(E2BIG);
888 error = xfs_read_buf(mp, mp->m_logdev_targp,
889 d - XFS_FSB_TO_BB(mp, 1),
890 XFS_FSB_TO_BB(mp, 1), 0, &bp);
894 cmn_err(CE_WARN, "XFS: size check 3 failed");
895 if (error == ENOSPC) {
896 error = XFS_ERROR(E2BIG);
903 * Initialize realtime fields in the mount structure
905 if ((error = xfs_rtmount_init(mp))) {
906 cmn_err(CE_WARN, "XFS: RT mount failed");
911 * For client case we are done now
913 if (mfsi_flags & XFS_MFSI_CLIENT) {
918 * Copies the low order bits of the timestamp and the randomly
919 * set "sequence" number out of a UUID.
921 uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
924 * The vfs structure needs to have a file system independent
925 * way of checking for the invariant file system ID. Since it
926 * can't look at mount structures it has a pointer to the data
927 * in the mount structure.
929 * File systems that don't support user level file handles (i.e.
930 * all of them except for XFS) will leave vfs_altfsid as NULL.
932 vfsp->vfs_altfsid = (xfs_fsid_t *)mp->m_fixedfsid;
933 mp->m_dmevmask = 0; /* not persistent; set after each mount */
938 * Initialize the attribute manager's entries.
940 mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
943 * Initialize the precomputed transaction reservations values.
948 * Allocate and initialize the inode hash table for this
955 * Allocate and initialize the per-ag data.
957 init_rwsem(&mp->m_peraglock);
959 kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP);
961 mp->m_maxagi = xfs_initialize_perag(vfsp, mp, sbp->sb_agcount);
964 * log's mount-time initialization. Perform 1st part recovery if needed
966 if (likely(sbp->sb_logblocks > 0)) { /* check for volume case */
967 error = xfs_log_mount(mp, mp->m_logdev_targp,
968 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
969 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
971 cmn_err(CE_WARN, "XFS: log mount failed");
974 } else { /* No log has been defined */
975 cmn_err(CE_WARN, "XFS: no log defined");
976 XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW, mp);
977 error = XFS_ERROR(EFSCORRUPTED);
982 * Get and sanity-check the root inode.
983 * Save the pointer to it in the mount structure.
985 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
987 cmn_err(CE_WARN, "XFS: failed to read root inode");
994 if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) {
995 cmn_err(CE_WARN, "XFS: corrupted root inode");
996 cmn_err(CE_WARN, "Device %s - root %llu is not a directory",
997 XFS_BUFTARG_NAME(mp->m_ddev_targp),
998 (unsigned long long)rip->i_ino);
999 xfs_iunlock(rip, XFS_ILOCK_EXCL);
1000 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
1002 error = XFS_ERROR(EFSCORRUPTED);
1005 mp->m_rootip = rip; /* save it */
1007 xfs_iunlock(rip, XFS_ILOCK_EXCL);
1010 * Initialize realtime inode pointers in the mount structure
1012 if ((error = xfs_rtmount_inodes(mp))) {
1014 * Free up the root inode.
1016 cmn_err(CE_WARN, "XFS: failed to read RT inodes");
1021 * If fs is not mounted readonly, then update the superblock
1022 * unit and width changes.
1024 if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY))
1025 xfs_mount_log_sbunit(mp, update_flags);
1028 * Initialise the XFS quota management subsystem for this mount
1030 if ((error = XFS_QM_INIT(mp, "amount, "aflags)))
1034 * Finish recovering the file system. This part needed to be
1035 * delayed until after the root and real-time bitmap inodes
1036 * were consistently read in.
1038 error = xfs_log_mount_finish(mp, mfsi_flags);
1040 cmn_err(CE_WARN, "XFS: log mount finish failed");
1045 * Complete the quota initialisation, post-log-replay component.
1047 if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags)))
1054 * Free up the root inode.
1058 xfs_log_unmount_dealloc(mp);
1062 for (agno = 0; agno < sbp->sb_agcount; agno++)
1063 if (mp->m_perag[agno].pagb_list)
1064 kmem_free(mp->m_perag[agno].pagb_list,
1065 sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS);
1066 kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t));
1071 xfs_uuid_unmount(mp);
1079 * This flushes out the inodes,dquots and the superblock, unmounts the
1080 * log and makes sure that incore structures are freed.
1083 xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
1085 struct bhv_vfs *vfsp = XFS_MTOVFS(mp);
1086 #if defined(DEBUG) || defined(INDUCE_IO_ERROR)
1092 XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
1095 * Flush out the log synchronously so that we know for sure
1096 * that nothing is pinned. This is important because bflush()
1097 * will skip pinned buffers.
1099 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
1101 xfs_binval(mp->m_ddev_targp);
1102 if (mp->m_rtdev_targp) {
1103 xfs_binval(mp->m_rtdev_targp);
1106 xfs_unmountfs_writesb(mp);
1108 xfs_unmountfs_wait(mp); /* wait for async bufs */
1110 xfs_log_unmount(mp); /* Done! No more fs ops. */
1115 * All inodes from this mount point should be freed.
1117 ASSERT(mp->m_inodes == NULL);
1119 xfs_unmountfs_close(mp, cr);
1120 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
1121 xfs_uuid_unmount(mp);
1123 #if defined(DEBUG) || defined(INDUCE_IO_ERROR)
1125 * clear all error tags on this filesystem
1127 memcpy(&fsid, &vfsp->vfs_fsid, sizeof(int64_t));
1128 xfs_errortag_clearall_umount(fsid, mp->m_fsname, 0);
1131 xfs_mount_free(mp, 1);
1136 xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
1138 if (mp->m_logdev_targp != mp->m_ddev_targp)
1139 xfs_free_buftarg(mp->m_logdev_targp, 1);
1140 if (mp->m_rtdev_targp)
1141 xfs_free_buftarg(mp->m_rtdev_targp, 1);
1142 xfs_free_buftarg(mp->m_ddev_targp, 0);
1146 xfs_unmountfs_wait(xfs_mount_t *mp)
1148 if (mp->m_logdev_targp != mp->m_ddev_targp)
1149 xfs_wait_buftarg(mp->m_logdev_targp);
1150 if (mp->m_rtdev_targp)
1151 xfs_wait_buftarg(mp->m_rtdev_targp);
1152 xfs_wait_buftarg(mp->m_ddev_targp);
1156 xfs_unmountfs_writesb(xfs_mount_t *mp)
1163 * skip superblock write if fs is read-only, or
1164 * if we are doing a forced umount.
1166 sbp = xfs_getsb(mp, 0);
1167 if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY ||
1168 XFS_FORCED_SHUTDOWN(mp))) {
1170 xfs_icsb_sync_counters(mp);
1173 * mark shared-readonly if desired
1175 sb = XFS_BUF_TO_SBP(sbp);
1176 if (mp->m_mk_sharedro) {
1177 if (!(sb->sb_flags & XFS_SBF_READONLY))
1178 sb->sb_flags |= XFS_SBF_READONLY;
1179 if (!XFS_SB_VERSION_HASSHARED(sb))
1180 XFS_SB_VERSION_ADDSHARED(sb);
1181 xfs_fs_cmn_err(CE_NOTE, mp,
1182 "Unmounting, marking shared read-only");
1184 XFS_BUF_UNDONE(sbp);
1185 XFS_BUF_UNREAD(sbp);
1186 XFS_BUF_UNDELAYWRITE(sbp);
1188 XFS_BUF_UNASYNC(sbp);
1189 ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
1190 xfsbdstrat(mp, sbp);
1191 /* Nevermind errors we might get here. */
1192 error = xfs_iowait(sbp);
1194 xfs_ioerror_alert("xfs_unmountfs_writesb",
1195 mp, sbp, XFS_BUF_ADDR(sbp));
1196 if (error && mp->m_mk_sharedro)
1197 xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly");
1204 * xfs_mod_sb() can be used to copy arbitrary changes to the
1205 * in-core superblock into the superblock buffer to be logged.
1206 * It does not provide the higher level of locking that is
1207 * needed to protect the in-core superblock from concurrent
1211 xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1224 bp = xfs_trans_getsb(tp, mp, 0);
1225 sbp = XFS_BUF_TO_SBP(bp);
1226 first = sizeof(xfs_sb_t);
1229 /* translate/copy */
1231 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), -1, fields);
1233 /* find modified range */
1235 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
1236 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1237 first = xfs_sb_info[f].offset;
1239 f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
1240 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1241 last = xfs_sb_info[f + 1].offset - 1;
1243 xfs_trans_log_buf(tp, bp, first, last);
1248 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
1249 * a delta to a specified field in the in-core superblock. Simply
1250 * switch on the field indicated and apply the delta to that field.
1251 * Fields are not allowed to dip below zero, so if the delta would
1252 * do this do not apply it and return EINVAL.
1254 * The SB_LOCK must be held when this routine is called.
1257 xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
1258 int delta, int rsvd)
1260 int scounter; /* short counter for 32 bit fields */
1261 long long lcounter; /* long counter for 64 bit fields */
1262 long long res_used, rem;
1265 * With the in-core superblock spin lock held, switch
1266 * on the indicated field. Apply the delta to the
1267 * proper field. If the fields value would dip below
1268 * 0, then do not apply the delta and return EINVAL.
1271 case XFS_SBS_ICOUNT:
1272 lcounter = (long long)mp->m_sb.sb_icount;
1276 return XFS_ERROR(EINVAL);
1278 mp->m_sb.sb_icount = lcounter;
1281 lcounter = (long long)mp->m_sb.sb_ifree;
1285 return XFS_ERROR(EINVAL);
1287 mp->m_sb.sb_ifree = lcounter;
1289 case XFS_SBS_FDBLOCKS:
1291 lcounter = (long long)
1292 mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1293 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1295 if (delta > 0) { /* Putting blocks back */
1296 if (res_used > delta) {
1297 mp->m_resblks_avail += delta;
1299 rem = delta - res_used;
1300 mp->m_resblks_avail = mp->m_resblks;
1303 } else { /* Taking blocks away */
1308 * If were out of blocks, use any available reserved blocks if
1314 lcounter = (long long)mp->m_resblks_avail + delta;
1316 return XFS_ERROR(ENOSPC);
1318 mp->m_resblks_avail = lcounter;
1320 } else { /* not reserved */
1321 return XFS_ERROR(ENOSPC);
1326 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1328 case XFS_SBS_FREXTENTS:
1329 lcounter = (long long)mp->m_sb.sb_frextents;
1332 return XFS_ERROR(ENOSPC);
1334 mp->m_sb.sb_frextents = lcounter;
1336 case XFS_SBS_DBLOCKS:
1337 lcounter = (long long)mp->m_sb.sb_dblocks;
1341 return XFS_ERROR(EINVAL);
1343 mp->m_sb.sb_dblocks = lcounter;
1345 case XFS_SBS_AGCOUNT:
1346 scounter = mp->m_sb.sb_agcount;
1350 return XFS_ERROR(EINVAL);
1352 mp->m_sb.sb_agcount = scounter;
1354 case XFS_SBS_IMAX_PCT:
1355 scounter = mp->m_sb.sb_imax_pct;
1359 return XFS_ERROR(EINVAL);
1361 mp->m_sb.sb_imax_pct = scounter;
1363 case XFS_SBS_REXTSIZE:
1364 scounter = mp->m_sb.sb_rextsize;
1368 return XFS_ERROR(EINVAL);
1370 mp->m_sb.sb_rextsize = scounter;
1372 case XFS_SBS_RBMBLOCKS:
1373 scounter = mp->m_sb.sb_rbmblocks;
1377 return XFS_ERROR(EINVAL);
1379 mp->m_sb.sb_rbmblocks = scounter;
1381 case XFS_SBS_RBLOCKS:
1382 lcounter = (long long)mp->m_sb.sb_rblocks;
1386 return XFS_ERROR(EINVAL);
1388 mp->m_sb.sb_rblocks = lcounter;
1390 case XFS_SBS_REXTENTS:
1391 lcounter = (long long)mp->m_sb.sb_rextents;
1395 return XFS_ERROR(EINVAL);
1397 mp->m_sb.sb_rextents = lcounter;
1399 case XFS_SBS_REXTSLOG:
1400 scounter = mp->m_sb.sb_rextslog;
1404 return XFS_ERROR(EINVAL);
1406 mp->m_sb.sb_rextslog = scounter;
1410 return XFS_ERROR(EINVAL);
1415 * xfs_mod_incore_sb() is used to change a field in the in-core
1416 * superblock structure by the specified delta. This modification
1417 * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked()
1418 * routine to do the work.
1421 xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd)
1426 /* check for per-cpu counters */
1428 #ifdef HAVE_PERCPU_SB
1429 case XFS_SBS_ICOUNT:
1431 case XFS_SBS_FDBLOCKS:
1432 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1433 status = xfs_icsb_modify_counters(mp, field,
1440 s = XFS_SB_LOCK(mp);
1441 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1442 XFS_SB_UNLOCK(mp, s);
1450 * xfs_mod_incore_sb_batch() is used to change more than one field
1451 * in the in-core superblock structure at a time. This modification
1452 * is protected by a lock internal to this module. The fields and
1453 * changes to those fields are specified in the array of xfs_mod_sb
1454 * structures passed in.
1456 * Either all of the specified deltas will be applied or none of
1457 * them will. If any modified field dips below 0, then all modifications
1458 * will be backed out and EINVAL will be returned.
1461 xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1468 * Loop through the array of mod structures and apply each
1469 * individually. If any fail, then back out all those
1470 * which have already been applied. Do all of this within
1471 * the scope of the SB_LOCK so that all of the changes will
1474 s = XFS_SB_LOCK(mp);
1476 for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
1478 * Apply the delta at index n. If it fails, break
1479 * from the loop so we'll fall into the undo loop
1482 switch (msbp->msb_field) {
1483 #ifdef HAVE_PERCPU_SB
1484 case XFS_SBS_ICOUNT:
1486 case XFS_SBS_FDBLOCKS:
1487 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1488 XFS_SB_UNLOCK(mp, s);
1489 status = xfs_icsb_modify_counters(mp,
1491 msbp->msb_delta, rsvd);
1492 s = XFS_SB_LOCK(mp);
1498 status = xfs_mod_incore_sb_unlocked(mp,
1500 msbp->msb_delta, rsvd);
1510 * If we didn't complete the loop above, then back out
1511 * any changes made to the superblock. If you add code
1512 * between the loop above and here, make sure that you
1513 * preserve the value of status. Loop back until
1514 * we step below the beginning of the array. Make sure
1515 * we don't touch anything back there.
1519 while (msbp >= msb) {
1520 switch (msbp->msb_field) {
1521 #ifdef HAVE_PERCPU_SB
1522 case XFS_SBS_ICOUNT:
1524 case XFS_SBS_FDBLOCKS:
1525 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
1526 XFS_SB_UNLOCK(mp, s);
1527 status = xfs_icsb_modify_counters(mp,
1531 s = XFS_SB_LOCK(mp);
1537 status = xfs_mod_incore_sb_unlocked(mp,
1543 ASSERT(status == 0);
1547 XFS_SB_UNLOCK(mp, s);
1552 * xfs_getsb() is called to obtain the buffer for the superblock.
1553 * The buffer is returned locked and read in from disk.
1554 * The buffer should be released with a call to xfs_brelse().
1556 * If the flags parameter is BUF_TRYLOCK, then we'll only return
1557 * the superblock buffer if it can be locked without sleeping.
1558 * If it can't then we'll return NULL.
1567 ASSERT(mp->m_sb_bp != NULL);
1569 if (flags & XFS_BUF_TRYLOCK) {
1570 if (!XFS_BUF_CPSEMA(bp)) {
1574 XFS_BUF_PSEMA(bp, PRIBIO);
1577 ASSERT(XFS_BUF_ISDONE(bp));
1582 * Used to free the superblock along various error paths.
1591 * Use xfs_getsb() so that the buffer will be locked
1592 * when we call xfs_buf_relse().
1594 bp = xfs_getsb(mp, 0);
1595 XFS_BUF_UNMANAGE(bp);
1601 * See if the UUID is unique among mounted XFS filesystems.
1602 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
1608 if (uuid_is_nil(&mp->m_sb.sb_uuid)) {
1610 "XFS: Filesystem %s has nil UUID - can't mount",
1614 if (!uuid_table_insert(&mp->m_sb.sb_uuid)) {
1616 "XFS: Filesystem %s has duplicate UUID - can't mount",
1624 * Remove filesystem from the UUID table.
1630 uuid_table_remove(&mp->m_sb.sb_uuid);
1634 * Used to log changes to the superblock unit and width fields which could
1635 * be altered by the mount options. Only the first superblock is updated.
1638 xfs_mount_log_sbunit(
1644 ASSERT(fields & (XFS_SB_UNIT|XFS_SB_WIDTH|XFS_SB_UUID));
1646 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
1647 if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1648 XFS_DEFAULT_LOG_COUNT)) {
1649 xfs_trans_cancel(tp, 0);
1652 xfs_mod_sb(tp, fields);
1653 xfs_trans_commit(tp, 0, NULL);
1657 #ifdef HAVE_PERCPU_SB
1659 * Per-cpu incore superblock counters
1661 * Simple concept, difficult implementation
1663 * Basically, replace the incore superblock counters with a distributed per cpu
1664 * counter for contended fields (e.g. free block count).
1666 * Difficulties arise in that the incore sb is used for ENOSPC checking, and
1667 * hence needs to be accurately read when we are running low on space. Hence
1668 * there is a method to enable and disable the per-cpu counters based on how
1669 * much "stuff" is available in them.
1671 * Basically, a counter is enabled if there is enough free resource to justify
1672 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
1673 * ENOSPC), then we disable the counters to synchronise all callers and
1674 * re-distribute the available resources.
1676 * If, once we redistributed the available resources, we still get a failure,
1677 * we disable the per-cpu counter and go through the slow path.
1679 * The slow path is the current xfs_mod_incore_sb() function. This means that
1680 * when we disable a per-cpu counter, we need to drain it's resources back to
1681 * the global superblock. We do this after disabling the counter to prevent
1682 * more threads from queueing up on the counter.
1684 * Essentially, this means that we still need a lock in the fast path to enable
1685 * synchronisation between the global counters and the per-cpu counters. This
1686 * is not a problem because the lock will be local to a CPU almost all the time
1687 * and have little contention except when we get to ENOSPC conditions.
1689 * Basically, this lock becomes a barrier that enables us to lock out the fast
1690 * path while we do things like enabling and disabling counters and
1691 * synchronising the counters.
1695 * 1. XFS_SB_LOCK() before picking up per-cpu locks
1696 * 2. per-cpu locks always picked up via for_each_online_cpu() order
1697 * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks
1698 * 4. modifying per-cpu counters requires holding per-cpu lock
1699 * 5. modifying global counters requires holding XFS_SB_LOCK
1700 * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK
1701 * and _none_ of the per-cpu locks.
1703 * Disabled counters are only ever re-enabled by a balance operation
1704 * that results in more free resources per CPU than a given threshold.
1705 * To ensure counters don't remain disabled, they are rebalanced when
1706 * the global resource goes above a higher threshold (i.e. some hysteresis
1707 * is present to prevent thrashing).
1710 #ifdef CONFIG_HOTPLUG_CPU
1712 * hot-plug CPU notifier support.
1714 * We need a notifier per filesystem as we need to be able to identify
1715 * the filesystem to balance the counters out. This is achieved by
1716 * having a notifier block embedded in the xfs_mount_t and doing pointer
1717 * magic to get the mount pointer from the notifier block address.
1720 xfs_icsb_cpu_notify(
1721 struct notifier_block *nfb,
1722 unsigned long action,
1725 xfs_icsb_cnts_t *cntp;
1729 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
1730 cntp = (xfs_icsb_cnts_t *)
1731 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
1733 case CPU_UP_PREPARE:
1734 /* Easy Case - initialize the area and locks, and
1735 * then rebalance when online does everything else for us. */
1736 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1739 mutex_lock(&mp->m_icsb_mutex);
1740 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
1741 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
1742 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
1743 mutex_unlock(&mp->m_icsb_mutex);
1746 /* Disable all the counters, then fold the dead cpu's
1747 * count into the total on the global superblock and
1748 * re-enable the counters. */
1749 mutex_lock(&mp->m_icsb_mutex);
1750 s = XFS_SB_LOCK(mp);
1751 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1752 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
1753 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
1755 mp->m_sb.sb_icount += cntp->icsb_icount;
1756 mp->m_sb.sb_ifree += cntp->icsb_ifree;
1757 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
1759 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1761 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT,
1762 XFS_ICSB_SB_LOCKED, 0);
1763 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE,
1764 XFS_ICSB_SB_LOCKED, 0);
1765 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
1766 XFS_ICSB_SB_LOCKED, 0);
1767 XFS_SB_UNLOCK(mp, s);
1768 mutex_unlock(&mp->m_icsb_mutex);
1774 #endif /* CONFIG_HOTPLUG_CPU */
1777 xfs_icsb_init_counters(
1780 xfs_icsb_cnts_t *cntp;
1783 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
1784 if (mp->m_sb_cnts == NULL)
1787 #ifdef CONFIG_HOTPLUG_CPU
1788 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
1789 mp->m_icsb_notifier.priority = 0;
1790 register_hotcpu_notifier(&mp->m_icsb_notifier);
1791 #endif /* CONFIG_HOTPLUG_CPU */
1793 for_each_online_cpu(i) {
1794 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1795 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1798 mutex_init(&mp->m_icsb_mutex);
1801 * start with all counters disabled so that the
1802 * initial balance kicks us off correctly
1804 mp->m_icsb_counters = -1;
1809 xfs_icsb_destroy_counters(
1812 if (mp->m_sb_cnts) {
1813 unregister_hotcpu_notifier(&mp->m_icsb_notifier);
1814 free_percpu(mp->m_sb_cnts);
1820 xfs_icsb_cnts_t *icsbp)
1822 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
1828 xfs_icsb_unlock_cntr(
1829 xfs_icsb_cnts_t *icsbp)
1831 clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
1836 xfs_icsb_lock_all_counters(
1839 xfs_icsb_cnts_t *cntp;
1842 for_each_online_cpu(i) {
1843 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1844 xfs_icsb_lock_cntr(cntp);
1849 xfs_icsb_unlock_all_counters(
1852 xfs_icsb_cnts_t *cntp;
1855 for_each_online_cpu(i) {
1856 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1857 xfs_icsb_unlock_cntr(cntp);
1864 xfs_icsb_cnts_t *cnt,
1867 xfs_icsb_cnts_t *cntp;
1870 memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
1872 if (!(flags & XFS_ICSB_LAZY_COUNT))
1873 xfs_icsb_lock_all_counters(mp);
1875 for_each_online_cpu(i) {
1876 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1877 cnt->icsb_icount += cntp->icsb_icount;
1878 cnt->icsb_ifree += cntp->icsb_ifree;
1879 cnt->icsb_fdblocks += cntp->icsb_fdblocks;
1882 if (!(flags & XFS_ICSB_LAZY_COUNT))
1883 xfs_icsb_unlock_all_counters(mp);
1887 xfs_icsb_counter_disabled(
1889 xfs_sb_field_t field)
1891 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1892 return test_bit(field, &mp->m_icsb_counters);
1896 xfs_icsb_disable_counter(
1898 xfs_sb_field_t field)
1900 xfs_icsb_cnts_t cnt;
1902 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1905 * If we are already disabled, then there is nothing to do
1906 * here. We check before locking all the counters to avoid
1907 * the expensive lock operation when being called in the
1908 * slow path and the counter is already disabled. This is
1909 * safe because the only time we set or clear this state is under
1912 if (xfs_icsb_counter_disabled(mp, field))
1915 xfs_icsb_lock_all_counters(mp);
1916 if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
1917 /* drain back to superblock */
1919 xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT);
1921 case XFS_SBS_ICOUNT:
1922 mp->m_sb.sb_icount = cnt.icsb_icount;
1925 mp->m_sb.sb_ifree = cnt.icsb_ifree;
1927 case XFS_SBS_FDBLOCKS:
1928 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1935 xfs_icsb_unlock_all_counters(mp);
1941 xfs_icsb_enable_counter(
1943 xfs_sb_field_t field,
1947 xfs_icsb_cnts_t *cntp;
1950 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1952 xfs_icsb_lock_all_counters(mp);
1953 for_each_online_cpu(i) {
1954 cntp = per_cpu_ptr(mp->m_sb_cnts, i);
1956 case XFS_SBS_ICOUNT:
1957 cntp->icsb_icount = count + resid;
1960 cntp->icsb_ifree = count + resid;
1962 case XFS_SBS_FDBLOCKS:
1963 cntp->icsb_fdblocks = count + resid;
1971 clear_bit(field, &mp->m_icsb_counters);
1972 xfs_icsb_unlock_all_counters(mp);
1976 xfs_icsb_sync_counters_int(
1980 xfs_icsb_cnts_t cnt;
1983 /* Pass 1: lock all counters */
1984 if ((flags & XFS_ICSB_SB_LOCKED) == 0)
1985 s = XFS_SB_LOCK(mp);
1987 xfs_icsb_count(mp, &cnt, flags);
1989 /* Step 3: update mp->m_sb fields */
1990 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
1991 mp->m_sb.sb_icount = cnt.icsb_icount;
1992 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
1993 mp->m_sb.sb_ifree = cnt.icsb_ifree;
1994 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
1995 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1997 if ((flags & XFS_ICSB_SB_LOCKED) == 0)
1998 XFS_SB_UNLOCK(mp, s);
2002 * Accurate update of per-cpu counters to incore superblock
2005 xfs_icsb_sync_counters(
2008 xfs_icsb_sync_counters_int(mp, 0);
2012 * lazy addition used for things like df, background sb syncs, etc
2015 xfs_icsb_sync_counters_lazy(
2018 xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT);
2022 * Balance and enable/disable counters as necessary.
2024 * Thresholds for re-enabling counters are somewhat magic. inode counts are
2025 * chosen to be the same number as single on disk allocation chunk per CPU, and
2026 * free blocks is something far enough zero that we aren't going thrash when we
2027 * get near ENOSPC. We also need to supply a minimum we require per cpu to
2028 * prevent looping endlessly when xfs_alloc_space asks for more than will
2029 * be distributed to a single CPU but each CPU has enough blocks to be
2032 * Note that we can be called when counters are already disabled.
2033 * xfs_icsb_disable_counter() optimises the counter locking in this case to
2034 * prevent locking every per-cpu counter needlessly.
2037 #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2038 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2039 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2041 xfs_icsb_balance_counter(
2043 xfs_sb_field_t field,
2047 uint64_t count, resid;
2048 int weight = num_online_cpus();
2050 uint64_t min = (uint64_t)min_per_cpu;
2052 if (!(flags & XFS_ICSB_SB_LOCKED))
2053 s = XFS_SB_LOCK(mp);
2055 /* disable counter and sync counter */
2056 xfs_icsb_disable_counter(mp, field);
2058 /* update counters - first CPU gets residual*/
2060 case XFS_SBS_ICOUNT:
2061 count = mp->m_sb.sb_icount;
2062 resid = do_div(count, weight);
2063 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2067 count = mp->m_sb.sb_ifree;
2068 resid = do_div(count, weight);
2069 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2072 case XFS_SBS_FDBLOCKS:
2073 count = mp->m_sb.sb_fdblocks;
2074 resid = do_div(count, weight);
2075 if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
2080 count = resid = 0; /* quiet, gcc */
2084 xfs_icsb_enable_counter(mp, field, count, resid);
2086 if (!(flags & XFS_ICSB_SB_LOCKED))
2087 XFS_SB_UNLOCK(mp, s);
2091 xfs_icsb_modify_counters(
2093 xfs_sb_field_t field,
2097 xfs_icsb_cnts_t *icsbp;
2098 long long lcounter; /* long counter for 64 bit fields */
2099 int cpu, ret = 0, s;
2104 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
2107 * if the counter is disabled, go to slow path
2109 if (unlikely(xfs_icsb_counter_disabled(mp, field)))
2111 xfs_icsb_lock_cntr(icsbp);
2112 if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
2113 xfs_icsb_unlock_cntr(icsbp);
2118 case XFS_SBS_ICOUNT:
2119 lcounter = icsbp->icsb_icount;
2121 if (unlikely(lcounter < 0))
2122 goto balance_counter;
2123 icsbp->icsb_icount = lcounter;
2127 lcounter = icsbp->icsb_ifree;
2129 if (unlikely(lcounter < 0))
2130 goto balance_counter;
2131 icsbp->icsb_ifree = lcounter;
2134 case XFS_SBS_FDBLOCKS:
2135 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
2137 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
2139 if (unlikely(lcounter < 0))
2140 goto balance_counter;
2141 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
2147 xfs_icsb_unlock_cntr(icsbp);
2155 * serialise with a mutex so we don't burn lots of cpu on
2156 * the superblock lock. We still need to hold the superblock
2157 * lock, however, when we modify the global structures.
2159 mutex_lock(&mp->m_icsb_mutex);
2162 * Now running atomically.
2164 * If the counter is enabled, someone has beaten us to rebalancing.
2165 * Drop the lock and try again in the fast path....
2167 if (!(xfs_icsb_counter_disabled(mp, field))) {
2168 mutex_unlock(&mp->m_icsb_mutex);
2173 * The counter is currently disabled. Because we are
2174 * running atomically here, we know a rebalance cannot
2175 * be in progress. Hence we can go straight to operating
2176 * on the global superblock. We do not call xfs_mod_incore_sb()
2177 * here even though we need to get the SB_LOCK. Doing so
2178 * will cause us to re-enter this function and deadlock.
2179 * Hence we get the SB_LOCK ourselves and then call
2180 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
2181 * directly on the global counters.
2183 s = XFS_SB_LOCK(mp);
2184 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2185 XFS_SB_UNLOCK(mp, s);
2188 * Now that we've modified the global superblock, we
2189 * may be able to re-enable the distributed counters
2190 * (e.g. lots of space just got freed). After that
2194 xfs_icsb_balance_counter(mp, field, 0, 0);
2195 mutex_unlock(&mp->m_icsb_mutex);
2199 xfs_icsb_unlock_cntr(icsbp);
2203 * We may have multiple threads here if multiple per-cpu
2204 * counters run dry at the same time. This will mean we can
2205 * do more balances than strictly necessary but it is not
2206 * the common slowpath case.
2208 mutex_lock(&mp->m_icsb_mutex);
2211 * running atomically.
2213 * This will leave the counter in the correct state for future
2214 * accesses. After the rebalance, we simply try again and our retry
2215 * will either succeed through the fast path or slow path without
2216 * another balance operation being required.
2218 xfs_icsb_balance_counter(mp, field, 0, delta);
2219 mutex_unlock(&mp->m_icsb_mutex);